id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
12,963 |
def test_query_products_query_with_filter_ids(
staff_api_client, product, query_products_with_filter
):
product_global_id = graphene.Node.to_global_id("Product", product.id)
variables = {"filter": {"ids": [product_global_id]}}
response = staff_api_client.post_graphql(
query_products_with_filter, variables, check_no_permissions=False
)
content = get_graphql_content(response)
products_data = content["data"]["products"]["edges"]
assert len(products_data) == 1
assert products_data[0]["node"]["id"] == product_global_id
|
def test_query_products_query_with_filter_ids(
staff_api_client, product, query_products_with_filter
):
product_global_id = graphene.Node.to_global_id("Product", product.id)
variables = {"filter": {"ids": [product_global_id]}}
response = staff_api_client.post_graphql(
query_products_with_filter, variables
)
content = get_graphql_content(response)
products_data = content["data"]["products"]["edges"]
assert len(products_data) == 1
assert products_data[0]["node"]["id"] == product_global_id
|
3,608 |
def run_static():
"""Runs the static tests.
Returns a statuscode of 0 if everything ran correctly.
Otherwise, it will return statuscode 1
"""
success = True
success &= do_process(
[
sys.executable,
path.join(current_directory, "tools", "static_word_checks.py"),
"--replace",
]
)
success &= do_process(
[
sys.executable,
path.join(current_directory, "tools", "check_documentation.py"),
]
)
success &= do_process(["black", "."], shell=True)
success &= do_process(["flake8", "--exclude=.eggs,build,docs"])
success &= do_process(["pydocstyle", "praw"])
# success &= do_process(["pylint", "--rcfile=.pylintrc", "praw"])
tmp_dir = mkdtemp()
try:
success &= do_process(["sphinx-build", "-W", "--keep-going", "docs", tmp_dir])
finally:
rmtree(tmp_dir)
return success
|
def run_static():
"""Runs the static tests.
Returns a statuscode of 0 if everything ran correctly.
Otherwise, it will return statuscode 1
"""
success = True
success &= do_process(
[
sys.executable,
path.join(current_directory, "tools", "static_word_checks.py"),
"--replace",
]
)
success &= do_process(
[
sys.executable,
path.join(current_directory, "tools", "check_documentation.py"),
]
)
success &= do_process(["black", "."])
success &= do_process(["flake8", "--exclude=.eggs,build,docs"])
success &= do_process(["pydocstyle", "praw"])
# success &= do_process(["pylint", "--rcfile=.pylintrc", "praw"])
tmp_dir = mkdtemp()
try:
success &= do_process(["sphinx-build", "-W", "--keep-going", "docs", tmp_dir])
finally:
rmtree(tmp_dir)
return success
|
47,203 |
def get_checkpoint_from_architecture(architecture):
try:
module = importlib.import_module(architecture.__module__)
except Exception:
logger.error(f"Ignoring architecture {architecture}")
return
if hasattr(module, "_CHECKPOINT_FOR_DOC"):
return module._CHECKPOINT_FOR_DOC
else:
logger.warning(f"Can't retrieve checkpoint from {architecture.__name__}")
|
def get_checkpoint_from_architecture(architecture):
try:
module = importlib.import_module(architecture.__module__)
except ImportError:
logger.error(f"Ignoring architecture {architecture}")
return
if hasattr(module, "_CHECKPOINT_FOR_DOC"):
return module._CHECKPOINT_FOR_DOC
else:
logger.warning(f"Can't retrieve checkpoint from {architecture.__name__}")
|
47,980 |
def build_argparser():
parser = ArgumentParser()
general = parser.add_argument_group('General')
general.add_argument('-i', '--input', required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
general.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
general.add_argument('-o', '--output',
help='Optional. Name of output to save.')
general.add_argument('-limit', '--output_limit', default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
general.add_argument('--output_resolution', default=None, type=resolution,
help='Optional. Specify the maximum output window resolution '
'in (width x height) format. Example: 1280x720. '
'Input frame size used by default.')
general.add_argument('--no_show', action='store_true',
help="Optional. Don't show output.")
general.add_argument('-cw', '--crop_width', default=0, type=int,
help='Optional. Crop the input stream to this width. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('-ch', '--crop_height', default=0, type=int,
help='Optional. Crop the input stream to this height. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'),
help='Optional. Algorithm for face matching. Default: HUNGARIAN.')
general.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
gallery = parser.add_argument_group('Faces database')
gallery.add_argument('-fg', type=Path, required=True,
help='Required. Path to the face images directory.')
gallery.add_argument('--run_detector', action='store_true',
help='Optional. Use Face Detection model to find faces '
'on the face images, otherwise use full images.')
gallery.add_argument('--allow_grow', action='store_true',
help='Optional. Allow to grow faces gallery and to dump on disk. '
'Available only if --no_show option is off.')
models = parser.add_argument_group('Models')
models.add_argument('-m_fd', type=Path, required=True,
help='Required. Path to an .xml file with Face Detection model.')
models.add_argument('-m_lm', type=Path, required=True,
help='Required. Path to an .xml file with Facial Landmarks Detection model.')
models.add_argument('-m_reid', type=Path, required=True,
help='Required. Path to an .xml file with Face Reidentification model.')
models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int,
help='Optional. Specify the input width of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int,
help='Optional. Specify the input height of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
infer = parser.add_argument_group('Inference options')
infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Detection model. '
'Default value is CPU.')
infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Facial Landmarks Detection '
'model. Default value is CPU.')
infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Reidentification '
'model. Default value is CPU.')
infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='',
help='Optional. For MKLDNN (CPU)-targeted custom layers, '
'if any. Path to a shared library with custom '
'layers implementations.')
infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='',
help='Optional. For clDNN (GPU)-targeted custom layers, '
'if any. Path to the XML file with descriptions '
'of the kernels.')
infer.add_argument('-v', '--verbose', action='store_true',
help='Optional. Be more verbose.')
infer.add_argument('-pc', '--perf_stats', action='store_true',
help='Optional. Output detailed per-layer performance stats.')
infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6,
help='Optional. Probability threshold for face detections.')
infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3,
help='Optional. Cosine distance threshold between two vectors '
'for face identification.')
infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15,
help='Optional. Scaling ratio for bboxes passed to face recognition.')
return parser
|
def build_argparser():
parser = ArgumentParser()
general = parser.add_argument_group('General')
general.add_argument('-i', '--input', required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
general.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
general.add_argument('-o', '--output',
help='Optional. Name of output to save.')
general.add_argument('-limit', '--output_limit', default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
general.add_argument('--output_resolution', default=None, type=resolution,
help='Optional. Specify the maximum output window resolution '
'in (width x height) format. Example: 1280x720. '
'Input frame size used by default.')
general.add_argument('--no_show', action='store_true',
help="Optional. Don't show output.")
general.add_argument('-cw', '--crop_width', default=0, type=int,
help='Optional. Crop the input stream to this width. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('-ch', '--crop_height', default=0, type=int,
help='Optional. Crop the input stream to this height. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'),
help='Optional. Algorithm for face matching. Default: HUNGARIAN.')
general.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
gallery = parser.add_argument_group('Faces database')
gallery.add_argument('-fg', type=Path, required=True,
help='Required. Path to the face images directory.')
gallery.add_argument('--run_detector', action='store_true',
help='Optional. Use Face Detection model to find faces '
'on the face images, otherwise use full images.')
gallery.add_argument('--allow_grow', action='store_true',
help='Optional. Allow to grow faces gallery and to dump on disk. '
'Available only if --no_show option is off.')
models = parser.add_argument_group('Models')
models.add_argument('-m_fd', type=Path, required=True,
help='Required. Path to an .xml file with Face Detection model.')
models.add_argument('-m_lm', type=Path, required=True,
help='Required. Path to an .xml file with Facial Landmarks Detection model.')
models.add_argument('-m_reid', type=Path, required=True,
help='Required. Path to an .xml file with Face Reidentification model.')
models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int,
help='Optional. Specify the input width of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int,
help='Optional. Specify the input height of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
infer = parser.add_argument_group('Inference options')
infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Detection model. '
'Default value is CPU.')
infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Facial Landmarks Detection '
'model. Default value is CPU.')
infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Reidentification '
'model. Default value is CPU.')
infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='',
help='Optional. For MKLDNN (CPU)-targeted custom layers, '
'if any. Path to a shared library with custom '
'layers implementations.')
infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='',
help='Optional. For clDNN (GPU)-targeted custom layers, '
'if any. Path to the XML file with descriptions '
'of the kernels.')
infer.add_argument('-v', '--verbose', action='store_true',
help='Optional. Be more verbose.')
infer.add_argument('-pc', '--perf_stats', action='store_true',
help='Optional. Output detailed per-layer performance stats.')
infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6,
help='Optional. Probability threshold for face detections.')
infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3,
help='Optional. Cosine distance threshold between two vectors '
'for face identification.')
infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15,
help='Optional. Scaling ratio for bboxes passed to face recognition.')
return parser
|
23,568 |
def initialize_scheduler():
"""
Start the scheduled background tasks. Re-schedule if interval settings changed.
"""
with SCHED_LOCK:
# Check if scheduler should be started
start_jobs = not len(SCHED.get_jobs())
# Update check
github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0
pms_update_notify_hours = CONFIG.PMS_UPDATE_NOTIFY_INTERVAL if 1 <= CONFIG.PMS_UPDATE_NOTIFY_INTERVAL <= 999 else 24
schedule_job(versioncheck.check_update, 'Check GitHub for updates',
hours=0, minutes=github_minutes, seconds=0, args=(bool(CONFIG.PLEXPY_AUTO_UPDATE), True))
backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6
schedule_job(database.make_backup, 'Backup Tautulli database',
hours=backup_hours, minutes=0, seconds=0, args=(True, True))
schedule_job(config.make_backup, 'Backup Tautulli config',
hours=backup_hours, minutes=0, seconds=0, args=(True, True))
if WS_CONNECTED and CONFIG.PMS_IP and CONFIG.PMS_TOKEN:
schedule_job(plextv.get_server_resources, 'Refresh Plex server URLs',
hours=12 * (not bool(CONFIG.PMS_URL_MANUAL)), minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_access, 'Check for Plex remote access',
hours=0, minutes=0, seconds=60 * bool(CONFIG.MONITOR_REMOTE_ACCESS))
schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',
hours=pms_update_notify_hours * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)
# Refresh the users list and libraries list
user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12
library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12
schedule_job(users.refresh_users, 'Refresh users list',
hours=user_hours, minutes=0, seconds=0)
schedule_job(libraries.refresh_libraries, 'Refresh libraries list',
hours=library_hours, minutes=0, seconds=0)
schedule_job(activity_pinger.connect_server, 'Check for server response',
hours=0, minutes=0, seconds=0)
schedule_job(web_socket.send_ping, 'Websocket ping',
hours=0, minutes=0, seconds=10 * bool(CONFIG.WEBSOCKET_MONITOR_PING_PONG))
else:
# Cancel all jobs
schedule_job(plextv.get_server_resources, 'Refresh Plex server URLs',
hours=0, minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_access, 'Check for Plex remote access',
hours=0, minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',
hours=0, minutes=0, seconds=0)
schedule_job(users.refresh_users, 'Refresh users list',
hours=0, minutes=0, seconds=0)
schedule_job(libraries.refresh_libraries, 'Refresh libraries list',
hours=0, minutes=0, seconds=0)
# Schedule job to reconnect server
schedule_job(activity_pinger.connect_server, 'Check for server response',
hours=0, minutes=0, seconds=60, args=(False,))
schedule_job(web_socket.send_ping, 'Websocket ping',
hours=0, minutes=0, seconds=0)
# Start scheduler
if start_jobs and len(SCHED.get_jobs()):
try:
SCHED.start()
except Exception as e:
logger.error(e)
|
def initialize_scheduler():
"""
Start the scheduled background tasks. Re-schedule if interval settings changed.
"""
with SCHED_LOCK:
# Check if scheduler should be started
start_jobs = not len(SCHED.get_jobs())
# Update check
github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0
pms_update_notify_hours = CONFIG.PMS_UPDATE_NOTIFY_INTERVAL if 1 <= CONFIG.PMS_UPDATE_NOTIFY_INTERVAL <= 999 else 24
schedule_job(versioncheck.check_update, 'Check GitHub for updates',
hours=0, minutes=github_minutes, seconds=0, args=(bool(CONFIG.PLEXPY_AUTO_UPDATE), True))
backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6
schedule_job(database.make_backup, 'Backup Tautulli database',
hours=backup_hours, minutes=0, seconds=0, args=(True, True))
schedule_job(config.make_backup, 'Backup Tautulli config',
hours=backup_hours, minutes=0, seconds=0, args=(True, True))
if WS_CONNECTED and CONFIG.PMS_IP and CONFIG.PMS_TOKEN:
schedule_job(plextv.get_server_resources, 'Refresh Plex server URLs',
hours=12 * (not bool(CONFIG.PMS_URL_MANUAL)), minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_access, 'Check for Plex remote access',
hours=0, minutes=0, seconds=60 * bool(CONFIG.MONITOR_REMOTE_ACCESS))
schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',
hours=pms_update_check_hours * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)
# Refresh the users list and libraries list
user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12
library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12
schedule_job(users.refresh_users, 'Refresh users list',
hours=user_hours, minutes=0, seconds=0)
schedule_job(libraries.refresh_libraries, 'Refresh libraries list',
hours=library_hours, minutes=0, seconds=0)
schedule_job(activity_pinger.connect_server, 'Check for server response',
hours=0, minutes=0, seconds=0)
schedule_job(web_socket.send_ping, 'Websocket ping',
hours=0, minutes=0, seconds=10 * bool(CONFIG.WEBSOCKET_MONITOR_PING_PONG))
else:
# Cancel all jobs
schedule_job(plextv.get_server_resources, 'Refresh Plex server URLs',
hours=0, minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_access, 'Check for Plex remote access',
hours=0, minutes=0, seconds=0)
schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',
hours=0, minutes=0, seconds=0)
schedule_job(users.refresh_users, 'Refresh users list',
hours=0, minutes=0, seconds=0)
schedule_job(libraries.refresh_libraries, 'Refresh libraries list',
hours=0, minutes=0, seconds=0)
# Schedule job to reconnect server
schedule_job(activity_pinger.connect_server, 'Check for server response',
hours=0, minutes=0, seconds=60, args=(False,))
schedule_job(web_socket.send_ping, 'Websocket ping',
hours=0, minutes=0, seconds=0)
# Start scheduler
if start_jobs and len(SCHED.get_jobs()):
try:
SCHED.start()
except Exception as e:
logger.error(e)
|
39,458 |
def make_r2d(argv=None):
if argv is None:
argv = sys.argv[1:]
# version must be checked before parse, as repo/cmd are required and
# will spit out an error if allowed to be parsed first.
if '--version' in argv:
print(__version__)
sys.exit(0)
args = get_argparser().parse_args(argv)
r2d = Repo2Docker()
if args.debug:
r2d.log_level = logging.DEBUG
r2d.load_config_file(args.config)
if args.appendix:
r2d.appendix = args.appendix
r2d.repo = args.repo
r2d.ref = args.ref
# user wants to mount a local directory into the container for
# editing
if args.editable:
# the user has to point at a directory, not just a path for us
# to be able to mount it. We might have content providers that can
# provide content from a local `something.zip` file, which we
# couldn't mount in editable mode
if os.path.isdir(args.repo):
r2d.volumes[os.path.abspath(args.repo)] = '.'
else:
r2d.log.error('Cannot mount "{}" in editable mode '
'as it is not a directory'.format(args.repo),
extra=dict(phase='failed'))
sys.exit(1)
if args.image_name:
r2d.output_image_spec = args.image_name
else:
# we will pick a name after fetching the repository
r2d.output_image_spec = ""
r2d.json_logs = args.json_logs
r2d.dry_run = not args.build
if r2d.dry_run:
# Can't push nor run if we aren't building
args.run = False
args.push = False
r2d.run = args.run
r2d.push = args.push
# check against r2d.run and not args.run as r2d.run is false on
# --no-build. Also r2d.volumes and not args.volumes since --editable
# modified r2d.volumes
if r2d.volumes and not r2d.run:
# Can't mount if we aren't running
print('To Mount volumes with -v, you also need to run the '
'container')
sys.exit(1)
for v in args.volumes:
src, dest = v.split(':')
r2d.volumes[src] = dest
r2d.run_cmd = args.cmd
if args.all_ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
sys.exit(1)
if args.ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
sys.exit(1)
if args.ports and not r2d.run_cmd:
print('To publish user defined port mapping, user must specify '
'the command to run in the container')
sys.exit(1)
r2d.ports = validate_and_generate_port_mapping(args.ports)
r2d.all_ports = args.all_ports
if args.user_id:
r2d.user_id = args.user_id
if args.user_name:
r2d.user_name = args.user_name
if args.build_memory_limit:
# if the string only contains numerals we assume it should be an int
# and specifies a size inn bytes
if args.build_memory_limit.isnumeric():
r2d.build_memory_limit = int(args.build_memory_limit)
else:
r2d.build_memory_limit = args.build_memory_limit
if args.environment and not r2d.run:
print('To specify environment variables, you also need to run '
'the container')
sys.exit(1)
if args.subdir:
r2d.subdir = args.subdir
if args.cache_from:
r2d.cache_from = args.cache_from
r2d.environment = args.environment
# if the source exists locally we don't want to delete it at the end
# FIXME: Find a better way to figure out if repo is 'local'. Push this into ContentProvider?
if os.path.exists(args.repo):
r2d.cleanup_checkout = False
else:
r2d.cleanup_checkout = args.clean
if args.target_repo_dir:
r2d.target_repo_dir = args.target_repo_dir
return r2d
|
def make_r2d(argv=None):
if argv is None:
argv = sys.argv[1:]
# version must be checked before parse, as repo/cmd are required and
# will spit out an error if allowed to be parsed first.
if '--version' in argv:
print(__version__)
sys.exit(0)
args = get_argparser().parse_args(argv)
r2d = Repo2Docker()
if args.debug:
r2d.log_level = logging.DEBUG
r2d.load_config_file(args.config)
if args.appendix:
r2d.appendix = args.appendix
r2d.repo = args.repo
r2d.ref = args.ref
# user wants to mount a local directory into the container for
# editing
if args.editable:
# the user has to point at a directory, not just a path for us
# to be able to mount it. We might have content providers that can
# provide content from a local `something.zip` file, which we
# couldn't mount in editable mode
if os.path.isdir(args.repo):
r2d.volumes[os.path.abspath(args.repo)] = '.'
else:
r2d.log.error('Cannot mount "{}" in editable mode '
'as it is not a directory'.format(args.repo),
extra=dict(phase='failed'))
sys.exit(1)
if args.image_name:
r2d.output_image_spec = args.image_name
else:
# we will pick a name after fetching the repository
r2d.output_image_spec = ""
r2d.json_logs = args.json_logs
r2d.dry_run = not args.build
if r2d.dry_run:
# Can't push nor run if we aren't building
args.run = False
args.push = False
r2d.run = args.run
r2d.push = args.push
# check against r2d.run and not args.run as r2d.run is false on
# --no-build. Also r2d.volumes and not args.volumes since --editable
# modified r2d.volumes
if r2d.volumes and not r2d.run:
# Can't mount if we aren't running
print('To Mount volumes with -v, you also need to run the '
'container')
sys.exit(1)
for v in args.volumes:
src, dest = v.split(':')
r2d.volumes[src] = dest
r2d.run_cmd = args.cmd
if args.all_ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
sys.exit(1)
if args.ports and not r2d.run:
print('To publish user defined port mappings, the container must '
'also be run')
sys.exit(1)
if args.ports and not r2d.run_cmd:
print('To publish user defined port mapping, user must specify '
'the command to run in the container')
sys.exit(1)
r2d.ports = validate_and_generate_port_mapping(args.ports)
r2d.all_ports = args.all_ports
if args.user_id:
r2d.user_id = args.user_id
if args.user_name:
r2d.user_name = args.user_name
if args.build_memory_limit:
# if the string only contains numerals we assume it should be an int
# and specifies a size in bytes
if args.build_memory_limit.isnumeric():
r2d.build_memory_limit = int(args.build_memory_limit)
else:
r2d.build_memory_limit = args.build_memory_limit
if args.environment and not r2d.run:
print('To specify environment variables, you also need to run '
'the container')
sys.exit(1)
if args.subdir:
r2d.subdir = args.subdir
if args.cache_from:
r2d.cache_from = args.cache_from
r2d.environment = args.environment
# if the source exists locally we don't want to delete it at the end
# FIXME: Find a better way to figure out if repo is 'local'. Push this into ContentProvider?
if os.path.exists(args.repo):
r2d.cleanup_checkout = False
else:
r2d.cleanup_checkout = args.clean
if args.target_repo_dir:
r2d.target_repo_dir = args.target_repo_dir
return r2d
|
48,911 |
def main():
print(version.BANNER)
args = parse_args()
init_logger(args)
if args.target.upper() == "LOCAL" :
if args.xmlfile is not None:
# Only given decrypt XML file
if os.path.exists(args.xmlfile):
g = GetGPPasswords(None, None)
logging.debug("Opening %s XML file for reading ..." % args.xmlfile)
f = open(args.xmlfile,'r')
rawdata = ''.join(f.readlines())
f.close()
results = g.parse_xmlfile_content(args.xmlfile, rawdata)
g.show(results)
else:
print('[!] File does not exists or is not readable.')
else:
domain, username, password, address, lmhash, nthash = parse_target(args)
try:
smbClient= init_smb_session(args, domain, username, password, address, lmhash, nthash)
g = GetGPPasswords(smbClient, args.share)
g.list_shares()
g.find_cpasswords(args.base_dir)
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
traceback.print_exc()
logging.error(str(e))
|
def main():
print(version.BANNER)
args = parse_args()
init_logger(args)
if args.target.upper() == "LOCAL" :
if args.xmlfile is not None:
# Only given decrypt XML file
if os.path.exists(args.xmlfile):
g = GetGPPasswords(None, None)
logging.debug("Opening %s XML file for reading ..." % args.xmlfile)
f = open(args.xmlfile,'r')
rawdata = ''.join(f.readlines())
f.close()
results = g.parse_xmlfile_content(args.xmlfile, rawdata)
g.show(results)
else:
print('[!] File does not exists or is not readable.')
else:
domain, username, password, address, lmhash, nthash = parse_target(args)
try:
smbClient= init_smb_session(args, domain, username, password, address, lmhash, nthash)
g = GetGPPasswords(smbClient, args.share)
g.list_shares()
g.find_cpasswords(args.base_dir)
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
traceback.print_exc()
logging.error(str(e))
|
56,342 |
def cross_chan_correlation(
st1, streams, shift_len=0.0, allow_individual_trace_shifts=True,
xcorr_func='fftw', concurrency="concurrent", cores=1, **kwargs):
"""
Calculate cross-channel correlation.
Determine the cross-channel correlation between two streams of
multichannel seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type streams: list
:param streams: Streams to compare to.
:type shift_len: float
:param shift_len:
Seconds to shift the streams by (total value for negative and positive
direction together)
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:type concurrency: str
:param concurrency: Concurrency for xcorr-func.
:type cores: int
:param cores: Number of threads to parallel over
:returns:
cross channel correlation, float - normalized by number of channels.
locations of maximums
:rtype: numpy.ndarray, numpy.ndarray
.. Note::
If no matching channels were found then the coherance and index for
that stream will be nan.
"""
# Cut all channels in stream-list to be the correct length (shorter than
# st1 if stack = False by shift_len).
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
n_streams = len(streams)
df = st1[0].stats.sampling_rate
end_trim = int((shift_len * df) / 2)
_streams = []
if end_trim > 0:
for stream in streams:
_stream = stream.copy() # Do not work on the users data
for tr in _stream:
tr.data = tr.data[end_trim: -end_trim]
if tr.stats.sampling_rate != df:
raise NotImplementedError("Sampling rates differ")
_streams.append(_stream)
streams = _streams
else:
# _prep_data_for_correlation works in place on data.
# We need to copy it first.
streams = [stream.copy() for stream in streams]
# Check which channels are in st1 and match those in the stream_list
st1, prep_streams, stream_indexes = _prep_data_for_correlation(
stream=st1.copy(), templates=streams,
template_names=list(range(len(streams))), force_stream_epoch=False)
# Run the correlations
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, _] = multichannel_normxcorr(
templates=prep_streams, stream=st1, cores=cores, stack=False, **kwargs)
# Find maximas, sum and divide by no_chans
if allow_individual_trace_shifts:
coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans
else:
cccsums = cccsums.sum(axis=1)
coherances = cccsums.max(axis=-1) / no_chans
# Subtract half length of correlogram and convert positions to seconds
positions = (cccsums.argmax(axis=-1) - end_trim) / df
# This section re-orders the coherences to correspond to the order of the
# input streams
_coherances = np.empty(n_streams)
if allow_individual_trace_shifts:
n_max_traces = max([len(st) for st in streams])
n_shifts_per_stream = positions.shape[1]
_positions = np.empty([positions.shape[0], n_max_traces])
else:
# _positions = np.empty_like(positions)
_positions = np.empty([positions.shape[0], 1])
n_shifts_per_stream = 1
_coherances.fill(np.nan)
_positions.fill(np.nan)
for coh_ind, stream_ind in enumerate(stream_indexes):
_coherances[stream_ind] = coherances[coh_ind]
_positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind]
if not allow_individual_trace_shifts: # remove empty third axis from array
_positions = _positions[:, ]
return _coherances, _positions
|
def cross_chan_correlation(
st1, streams, shift_len=0.0, allow_individual_trace_shifts=True,
xcorr_func='fftw', concurrency="concurrent", cores=1, **kwargs):
"""
Calculate cross-channel correlation.
Determine the cross-channel correlation between two streams of
multichannel seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type streams: list
:param streams: Streams to compare to.
:type shift_len: float
:param shift_len:
Seconds to shift the streams by (total value for negative and positive
direction together)
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:type concurrency: str
:param concurrency: Concurrency for xcorr-func.
:type cores: int
:param cores: Number of threads to parallel over
:returns:
cross channel correlation, float - normalized by number of channels.
locations of maximums
:rtype: numpy.ndarray, numpy.ndarray
.. Note::
If no matching channels were found then the coherance and index for
that stream will be nan.
"""
# Cut all channels in stream-list to be the correct length (shorter than
# st1 if stack = False by shift_len).
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
n_streams = len(streams)
df = st1[0].stats.sampling_rate
end_trim = int((shift_len * df) / 2)
_streams = []
if end_trim > 0:
for stream in streams:
_stream = stream.copy() # Do not work on the users data
for tr in _stream:
tr.data = tr.data[end_trim: -end_trim]
if tr.stats.sampling_rate != df:
raise NotImplementedError("Sampling rates differ")
_streams.append(_stream)
streams = _streams
else:
# _prep_data_for_correlation works in place on data.
# We need to copy it first.
streams = [stream.copy() for stream in streams]
# Check which channels are in st1 and match those in the stream_list
st1, prep_streams, stream_indexes = _prep_data_for_correlation(
stream=st1.copy(), templates=streams,
template_names=list(range(len(streams))), force_stream_epoch=False)
# Run the correlations
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, _] = multichannel_normxcorr(
templates=prep_streams, stream=st_preped, cores=cores, stack=False,
**kwargs)
# Find maximas, sum and divide by no_chans
if allow_individual_trace_shifts:
coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans
else:
cccsums = cccsums.sum(axis=1)
coherances = cccsums.max(axis=-1) / no_chans
# Subtract half length of correlogram and convert positions to seconds
positions = (cccsums.argmax(axis=-1) - end_trim) / df
# This section re-orders the coherences to correspond to the order of the
# input streams
_coherances = np.empty(n_streams)
if allow_individual_trace_shifts:
n_max_traces = max([len(st) for st in streams])
n_shifts_per_stream = positions.shape[1]
_positions = np.empty([positions.shape[0], n_max_traces])
else:
# _positions = np.empty_like(positions)
_positions = np.empty([positions.shape[0], 1])
n_shifts_per_stream = 1
_coherances.fill(np.nan)
_positions.fill(np.nan)
for coh_ind, stream_ind in enumerate(stream_indexes):
_coherances[stream_ind] = coherances[coh_ind]
_positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind]
if not allow_individual_trace_shifts: # remove empty third axis from array
_positions = _positions[:, ]
return _coherances, _positions
|
45,770 |
def try_get_seed(seed=None, device='cuda'):
"""Try to get the seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# When the seed is not set, unknown behavior may occur.
# Please refer to https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
|
def try_get_seed(seed=None, device='cuda'):
"""Try to get the seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent some potential bugs.
# Please refer to https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
|
45,268 |
def train(
params: Dict,
dtrain: ModinDMatrix,
*args,
evals=(),
nthread: Optional[int] = cpu_count(),
evenly_data_distribution: Optional[bool] = True,
**kwargs,
):
"""
Train XGBoost model.
Parameters
----------
params : dict
Booster params.
dtrain : ModinDMatrix
Data to be trained.
nthread: int
Number of threads for using in each node. By default it is equal to
number of threads on master node.
evenly_data_distribution: boolean, default True
Whether make evenly distribution of partitions between nodes or not.
In case `False` minimal datatransfer between nodes will be provided
but the data may not be evenly distributed.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for
`evals_result`, which is returned as part of function return value
instead of argument.
Returns
-------
dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
LOGGER.info("Training started")
s = time.time()
X, y = dtrain
assert len(X) == len(y)
X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution)
y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution)
assert len(X_row_parts) == len(y_row_parts), "Unaligned train data"
# Create remote actors
actors = create_actors(nthread=nthread)
add_as_eval_method = None
if len(evals):
for (eval_data, method) in evals:
if id(eval_data) == id(dtrain):
add_as_eval_method = method
evals.remove((eval_data, method))
evals_unwrapped = [
(
(
unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution),
unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution),
eval_method,
)
)
for ((eval_X, eval_y), eval_method) in evals
]
for (
eval_X_row_parts,
eval_y_row_parts,
eval_method,
) in evals_unwrapped:
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.add_eval_data.remote(
*Xy, eval_method=eval_method
),
eval_X_row_parts,
eval_y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.set_train_data.remote(
*Xy, add_as_eval_method=add_as_eval_method
),
X_row_parts,
y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
LOGGER.info(f"Data preparation time: {time.time() - s} s")
s = time.time()
# Start Rabit tracker
env = _start_rabit_tracker(len(actors))
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Train
fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors]
# All results should be the same because of Rabit tracking. So we just
# return the first one.
result = ray.get(fut[0])
LOGGER.info(f"Training time: {time.time() - s} s")
LOGGER.info("Training finished")
return result
|
def train(
params: Dict,
dtrain: ModinDMatrix,
*args,
evals=(),
nthread: Optional[int] = cpu_count(),
evenly_data_distribution: Optional[bool] = True,
**kwargs,
):
"""
Train XGBoost model.
Parameters
----------
params : dict
Booster params.
dtrain : ModinDMatrix
Data to be trained.
nthread: int
Number of threads for using in each node. By default it is equal to
number of threads on master node.
evenly_data_distribution: boolean, default True
Whether make evenly distribution of partitions between nodes or not.
In case `False` minimal datatransfer between nodes will be provided
but the data may not be evenly distributed.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for
`evals_result`, which is returned as part of function return value
instead of argument.
Returns
-------
dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
LOGGER.info("Training started")
s = time.time()
X, y = dtrain
assert len(X) == len(y)
X_row_parts = unwrap_row_partitions(X, bind_ip=not evenly_data_distribution)
y_row_parts = unwrap_row_partitions(y, bind_ip=not evenly_data_distribution)
assert len(X_row_parts) == len(y_row_parts), "Unaligned train data"
# Create remote actors
actors = create_actors(nthread=nthread)
add_as_eval_method = None
if len(evals):
for (eval_data, method) in evals:
if eval_data is dtrain:
add_as_eval_method = method
evals.remove((eval_data, method))
evals_unwrapped = [
(
(
unwrap_row_partitions(eval_X, bind_ip=not evenly_data_distribution),
unwrap_row_partitions(eval_y, bind_ip=not evenly_data_distribution),
eval_method,
)
)
for ((eval_X, eval_y), eval_method) in evals
]
for (
eval_X_row_parts,
eval_y_row_parts,
eval_method,
) in evals_unwrapped:
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.add_eval_data.remote(
*Xy, eval_method=eval_method
),
eval_X_row_parts,
eval_y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
# Split data across workers
_split_data_across_actors(
actors,
lambda actor, *Xy: actor.set_train_data.remote(
*Xy, add_as_eval_method=add_as_eval_method
),
X_row_parts,
y_row_parts,
evenly_data_distribution=evenly_data_distribution,
)
LOGGER.info(f"Data preparation time: {time.time() - s} s")
s = time.time()
# Start Rabit tracker
env = _start_rabit_tracker(len(actors))
rabit_args = [("%s=%s" % item).encode() for item in env.items()]
# Train
fut = [actor.train.remote(rabit_args, params, *args, **kwargs) for actor in actors]
# All results should be the same because of Rabit tracking. So we just
# return the first one.
result = ray.get(fut[0])
LOGGER.info(f"Training time: {time.time() - s} s")
LOGGER.info("Training finished")
return result
|
41,396 |
def make_labels(dest_folder, zoom, country, classes, ml_type, bounding_box, sparse, **kwargs):
"""Create label data from OSM QA tiles for specified classes
Perform the following operations:
- If necessary, re-tile OSM QA Tiles to the specified zoom level
- Iterate over all tiles within the bounding box and produce a label for each
- Save the label file as labels.npz
- Create an output for previewing the labels (GeoJSON or PNG depending upon ml_type)
Parameters
------------
dest_folder: str
Folder to save labels and example tiles into
zoom: int
The zoom level to create tiles at
country: str
The OSM QA Tile extract to download. The value should be a country string matching a value found in
`label_maker/countries.txt`
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
bounding_box: list
The bounding box to create images from. This should be given in the form: `[xmin, ymin, xmax, ymax]`
as longitude and latitude values between `[-180, 180]` and `[-90, 90]` respectively
sparse: boolean
Limit the total background tiles to write based on `background_ratio` kwarg.
geojson: str
File name for optional geojson label input
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
mbtiles_file = op.join(dest_folder, '{}.mbtiles'.format(country))
mbtiles_file_zoomed = op.join(dest_folder, '{}-z{!s}.mbtiles'.format(country, zoom))
if not op.exists(mbtiles_file_zoomed):
filtered_geo = kwargs.get('geojson') or op.join(dest_folder, '{}.geojson'.format(country))
fast_parse = []
if not op.exists(filtered_geo):
fast_parse = ['-P']
print('Retiling QA Tiles to zoom level {} (takes a bit)'.format(zoom))
ps = Popen(['tippecanoe-decode', '-c', '-f', mbtiles_file], stdout=PIPE)
stream_filter_fpath = op.join(op.dirname(label_maker.__file__), 'stream_filter.py')
run(['python', stream_filter_fpath, json.dumps(bounding_box)],
stdin=ps.stdout, stdout=open(filtered_geo, 'w'))
ps.wait()
run(['tippecanoe', '--no-feature-limit', '--no-tile-size-limit'] + fast_parse +
['-l', 'osm', '-f', '-z', str(zoom), '-Z', str(zoom), '-o',
mbtiles_file_zoomed, filtered_geo])
# Call tilereduce
print('Determining labels for each tile')
mbtiles_to_reduce = mbtiles_file_zoomed
tilereduce(dict(zoom=zoom, source=mbtiles_to_reduce, bbox=bounding_box,
args=dict(ml_type=ml_type, classes=classes)),
_mapper, _callback, _done)
# Add empty labels to any tiles which didn't have data
empty_label = _create_empty_label(ml_type, classes)
for tile in tiles(*bounding_box, [zoom]):
index = '-'.join([str(i) for i in tile])
global tile_results
if tile_results.get(index) is None:
tile_results[index] = empty_label
# Print a summary of the labels
_tile_results_summary(ml_type, classes)
# If the --sparse flag is provided, limit the total background tiles to write
if sparse:
pos_examples, neg_examples = [], []
for k in tile_results.keys():
# if we don't match any class, this is a negative example
if not sum([class_match(ml_type, tile_results[k], i + 1) for i, c in enumerate(classes)]):
neg_examples.append(k)
else:
pos_examples.append(k)
# Choose random subset of negative examples
n_neg_ex = int(kwargs['background_ratio'] * len(pos_examples))
neg_examples = np.random.choice(neg_examples, n_neg_ex, replace=False).tolist()
tile_results = {k: tile_results.get(k) for k in pos_examples + neg_examples}
print('Using sparse mode; subselected {} background tiles'.format(n_neg_ex))
# write out labels as numpy arrays
labels_file = op.join(dest_folder, 'labels.npz')
print('Writing out labels to {}'.format(labels_file))
np.savez(labels_file, **tile_results)
# write out labels as GeoJSON or PNG
if ml_type == 'classification':
features = []
for tile, label in tile_results.items():
feat = feature(Tile(*[int(t) for t in tile.split('-')]))
features.append(Feature(geometry=feat['geometry'],
properties=dict(label=label.tolist())))
json.dump(fc(features), open(op.join(dest_folder, 'classification.geojson'), 'w'))
elif ml_type == 'object-detection':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have at least one bounding box label
if bool(label.shape[0]):
label_file = '{}.png'.format(tile)
img = Image.new('RGB', (256, 256))
draw = ImageDraw.Draw(img)
for box in label:
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=class_color(box[4]))
print('Writing {}'.format(label_file))
img.save(op.join(label_folder, label_file))
elif ml_type == 'segmentation':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have any class pixels
if np.sum(label):
label_file = '{}.png'.format(tile)
visible_label = np.array([class_color(l) for l in np.nditer(label)]).reshape(256, 256, 3)
img = Image.fromarray(visible_label.astype(np.uint8))
print('Writing {}'.format(label_file))
img.save(op.join(label_folder, label_file))
|
def make_labels(dest_folder, zoom, country, classes, ml_type, bounding_box, sparse, **kwargs):
"""Create label data from OSM QA tiles for specified classes
Perform the following operations:
- If necessary, re-tile OSM QA Tiles to the specified zoom level
- Iterate over all tiles within the bounding box and produce a label for each
- Save the label file as labels.npz
- Create an output for previewing the labels (GeoJSON or PNG depending upon ml_type)
Parameters
------------
dest_folder: str
Folder to save labels and example tiles into
zoom: int
The zoom level to create tiles at
country: str
The OSM QA Tile extract to download. The value should be a country string matching a value found in
`label_maker/countries.txt`
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
bounding_box: list
The bounding box to create images from. This should be given in the form: `[xmin, ymin, xmax, ymax]`
as longitude and latitude values between `[-180, 180]` and `[-90, 90]` respectively
sparse: boolean
Limit the total background tiles to write based on `background_ratio` kwarg.
geojson: str
Filepath to optional geojson label input
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
mbtiles_file = op.join(dest_folder, '{}.mbtiles'.format(country))
mbtiles_file_zoomed = op.join(dest_folder, '{}-z{!s}.mbtiles'.format(country, zoom))
if not op.exists(mbtiles_file_zoomed):
filtered_geo = kwargs.get('geojson') or op.join(dest_folder, '{}.geojson'.format(country))
fast_parse = []
if not op.exists(filtered_geo):
fast_parse = ['-P']
print('Retiling QA Tiles to zoom level {} (takes a bit)'.format(zoom))
ps = Popen(['tippecanoe-decode', '-c', '-f', mbtiles_file], stdout=PIPE)
stream_filter_fpath = op.join(op.dirname(label_maker.__file__), 'stream_filter.py')
run(['python', stream_filter_fpath, json.dumps(bounding_box)],
stdin=ps.stdout, stdout=open(filtered_geo, 'w'))
ps.wait()
run(['tippecanoe', '--no-feature-limit', '--no-tile-size-limit'] + fast_parse +
['-l', 'osm', '-f', '-z', str(zoom), '-Z', str(zoom), '-o',
mbtiles_file_zoomed, filtered_geo])
# Call tilereduce
print('Determining labels for each tile')
mbtiles_to_reduce = mbtiles_file_zoomed
tilereduce(dict(zoom=zoom, source=mbtiles_to_reduce, bbox=bounding_box,
args=dict(ml_type=ml_type, classes=classes)),
_mapper, _callback, _done)
# Add empty labels to any tiles which didn't have data
empty_label = _create_empty_label(ml_type, classes)
for tile in tiles(*bounding_box, [zoom]):
index = '-'.join([str(i) for i in tile])
global tile_results
if tile_results.get(index) is None:
tile_results[index] = empty_label
# Print a summary of the labels
_tile_results_summary(ml_type, classes)
# If the --sparse flag is provided, limit the total background tiles to write
if sparse:
pos_examples, neg_examples = [], []
for k in tile_results.keys():
# if we don't match any class, this is a negative example
if not sum([class_match(ml_type, tile_results[k], i + 1) for i, c in enumerate(classes)]):
neg_examples.append(k)
else:
pos_examples.append(k)
# Choose random subset of negative examples
n_neg_ex = int(kwargs['background_ratio'] * len(pos_examples))
neg_examples = np.random.choice(neg_examples, n_neg_ex, replace=False).tolist()
tile_results = {k: tile_results.get(k) for k in pos_examples + neg_examples}
print('Using sparse mode; subselected {} background tiles'.format(n_neg_ex))
# write out labels as numpy arrays
labels_file = op.join(dest_folder, 'labels.npz')
print('Writing out labels to {}'.format(labels_file))
np.savez(labels_file, **tile_results)
# write out labels as GeoJSON or PNG
if ml_type == 'classification':
features = []
for tile, label in tile_results.items():
feat = feature(Tile(*[int(t) for t in tile.split('-')]))
features.append(Feature(geometry=feat['geometry'],
properties=dict(label=label.tolist())))
json.dump(fc(features), open(op.join(dest_folder, 'classification.geojson'), 'w'))
elif ml_type == 'object-detection':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have at least one bounding box label
if bool(label.shape[0]):
label_file = '{}.png'.format(tile)
img = Image.new('RGB', (256, 256))
draw = ImageDraw.Draw(img)
for box in label:
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=class_color(box[4]))
print('Writing {}'.format(label_file))
img.save(op.join(label_folder, label_file))
elif ml_type == 'segmentation':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have any class pixels
if np.sum(label):
label_file = '{}.png'.format(tile)
visible_label = np.array([class_color(l) for l in np.nditer(label)]).reshape(256, 256, 3)
img = Image.fromarray(visible_label.astype(np.uint8))
print('Writing {}'.format(label_file))
img.save(op.join(label_folder, label_file))
|
42,633 |
def test_market_request():
"""Test that we can query bisq for market prices"""
price = get_bisq_market_price(A_BSQ)
assert price != Price(ZERO)
# Test that error is correctly rised when there is no market
with pytest.raises(RemoteError):
get_bisq_market_price(A_3CRV)
|
def test_market_request():
"""Test that we can query bisq for market prices"""
price = get_bisq_market_price(A_BSQ)
assert price != Price(ZERO)
# Test that error is correctly raised when there is no market
with pytest.raises(RemoteError):
get_bisq_market_price(A_3CRV)
|
6,350 |
def containerlike_class_generator():
methods = set([
"__contains__",
"__getitem__",
"__iter__",
"__len__",
"__reversed__",
"count",
"get",
"index",
"items",
"keys",
"values",
])
# Deliberately starting from 0
for r in range(0, len(methods) + 1):
for selected_methods in sorted(
map(sorted, itertools.combinations(methods, r))):
class ContainerlikeClass(object):
def __init__(self, iterable):
self.__internal_dict__ = dict(iterable)
@classmethod
def name(cls):
return "ContainerlikeClass:{}".format(
":".join(selected_methods))
# for method in always_define.union(selected_methods):
for method in selected_methods:
func = method_factory(method)
setattr(ContainerlikeClass, method, func)
yield ContainerlikeClass
|
def generate_containerlike_class():
methods = set([
"__contains__",
"__getitem__",
"__iter__",
"__len__",
"__reversed__",
"count",
"get",
"index",
"items",
"keys",
"values",
])
# Deliberately starting from 0
for r in range(0, len(methods) + 1):
for selected_methods in sorted(
map(sorted, itertools.combinations(methods, r))):
class ContainerlikeClass(object):
def __init__(self, iterable):
self.__internal_dict__ = dict(iterable)
@classmethod
def name(cls):
return "ContainerlikeClass:{}".format(
":".join(selected_methods))
# for method in always_define.union(selected_methods):
for method in selected_methods:
func = method_factory(method)
setattr(ContainerlikeClass, method, func)
yield ContainerlikeClass
|
15,135 |
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(cv.deprecated("mars"), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name in [
__name__,
"homeassistant.helpers.config_validation",
]
assert (
"The 'mars' option is deprecated, " "please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
|
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(cv.deprecated("mars"), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name in [
__name__,
"homeassistant.helpers.config_validation",
]
assert (
"The 'mars' option is deprecated, please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
|
28,979 |
def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children']:
if child['character'] < 0 or child['character'] > 255:
raise InvalidProofError("child character not int between 0 and 255")
if previous_child_character:
if previous_child_character >= child['character']:
raise InvalidProofError("children not in increasing order")
previous_child_character = child['character']
to_hash += six.int2byte(child['character'])
if 'nodeHash' in child:
if len(child['nodeHash']) != 64:
raise InvalidProofError("invalid child nodeHash")
to_hash += binascii.unhexlify(child['nodeHash'])[::-1]
else:
if previous_computed_hash is None:
raise InvalidProofError("previous computed hash is None")
if found_child_in_chain is True:
raise InvalidProofError("already found the next child in the chain")
found_child_in_chain = True
reverse_computed_name += chr(child['character'])
to_hash += previous_computed_hash
if not found_child_in_chain:
if i != 0:
raise InvalidProofError("did not find the alleged child")
if i == 0 and 'txhash' in proof and 'nOut' in proof and 'last takeover height' in proof:
if len(proof['txhash']) != 64:
raise InvalidProofError("txhash was invalid: {}".format(proof['txhash']))
if not isinstance(proof['nOut'], (int,)):
raise InvalidProofError("nOut was invalid: {}".format(proof['nOut']))
if not isinstance(proof['last takeover height'], (int,)):
raise InvalidProofError(
'last takeover height was invalid: {}'.format(proof['last takeover height']))
to_hash += get_hash_for_outpoint(
binascii.unhexlify(proof['txhash'])[::-1],
proof['nOut'],
proof['last takeover height']
)
verified_value = True
elif 'valueHash' in node:
if len(node['valueHash']) != 64:
raise InvalidProofError("valueHash was invalid")
to_hash += binascii.unhexlify(node['valueHash'])[::-1]
previous_computed_hash = double_sha256(to_hash)
if previous_computed_hash != binascii.unhexlify(rootHash)[::-1]:
raise InvalidProofError("computed hash does not match roothash")
if 'txhash' in proof and 'nOut' in proof:
if not verified_value:
raise InvalidProofError("mismatch between proof claim and outcome")
if 'txhash' in proof and 'nOut' in proof:
if name != reverse_computed_name[::-1]:
raise InvalidProofError("name did not match proof")
if not name.startswith(reverse_computed_name[::-1]):
raise InvalidProofError("name fragment does not match proof")
return True
|
def verify_proof(proof, rootHash, name):
previous_computed_hash = None
reverse_computed_name = ''
verified_value = False
for i, node in enumerate(proof['nodes'][::-1]):
found_child_in_chain = False
to_hash = b''
previous_child_character = None
for child in node['children']:
if child['character'] < 0 or child['character'] > 255:
raise InvalidProofError("child character not int between 0 and 255")
if previous_child_character:
if previous_child_character >= child['character']:
raise InvalidProofError("children not in increasing order")
previous_child_character = child['character']
to_hash += six.int2byte(child['character'])
if 'nodeHash' in child:
if len(child['nodeHash']) != 64:
raise InvalidProofError("invalid child nodeHash")
to_hash += binascii.unhexlify(child['nodeHash'])[::-1]
else:
if previous_computed_hash is None:
raise InvalidProofError("previous computed hash is None")
if found_child_in_chain is True:
raise InvalidProofError("already found the next child in the chain")
found_child_in_chain = True
reverse_computed_name += chr(child['character'])
to_hash += previous_computed_hash
if not found_child_in_chain:
if i != 0:
raise InvalidProofError("did not find the alleged child")
if i == 0 and 'txhash' in proof and 'nOut' in proof and 'last takeover height' in proof:
if len(proof['txhash']) != 64:
raise InvalidProofError("txhash was invalid: {}".format(proof['txhash']))
if not isinstance(proof['nOut'], int):
raise InvalidProofError("nOut was invalid: {}".format(proof['nOut']))
if not isinstance(proof['last takeover height'], (int,)):
raise InvalidProofError(
'last takeover height was invalid: {}'.format(proof['last takeover height']))
to_hash += get_hash_for_outpoint(
binascii.unhexlify(proof['txhash'])[::-1],
proof['nOut'],
proof['last takeover height']
)
verified_value = True
elif 'valueHash' in node:
if len(node['valueHash']) != 64:
raise InvalidProofError("valueHash was invalid")
to_hash += binascii.unhexlify(node['valueHash'])[::-1]
previous_computed_hash = double_sha256(to_hash)
if previous_computed_hash != binascii.unhexlify(rootHash)[::-1]:
raise InvalidProofError("computed hash does not match roothash")
if 'txhash' in proof and 'nOut' in proof:
if not verified_value:
raise InvalidProofError("mismatch between proof claim and outcome")
if 'txhash' in proof and 'nOut' in proof:
if name != reverse_computed_name[::-1]:
raise InvalidProofError("name did not match proof")
if not name.startswith(reverse_computed_name[::-1]):
raise InvalidProofError("name fragment does not match proof")
return True
|
3,411 |
def _get_metrics_filter_ids(metric_names: Sequence[str]) -> Set[int]:
"""
Returns a set of metric_ids that map to input metric names and raises an exception if
metric cannot be resolved in the indexer
"""
if not metric_names:
return set()
metric_ids = set()
metric_names_deque = deque(metric_names)
while metric_names_deque:
name = metric_names_deque.popleft()
if name not in DERIVED_METRICS:
metric_ids.add(indexer.resolve(name))
else:
derived_metric_obj = DERIVED_METRICS[name]
try:
metric_ids |= derived_metric_obj.generate_metric_ids()
except NotSupportedOverCompositeEntityException:
single_entity_constituents = set(
list(
derived_metric_obj.naively_generate_singular_entity_constituents().values()
).pop()
)
metric_names_deque.extend(single_entity_constituents)
if None in metric_ids:
# We are looking for tags that appear in all given metrics.
# A tag cannot appear in a metric if the metric is not even indexed.
raise MetricDoesNotExistInIndexer()
return metric_ids
|
def _get_metrics_filter_ids(metric_names: Sequence[str]) -> Set[int]:
"""
Returns a set of metric_ids that map to input metric names and raises an exception if
metric cannot be resolved in the indexer
"""
if not metric_names:
return set()
metric_ids = set()
metric_names_deque = deque(metric_names)
while metric_names_deque:
name = metric_names_deque.popleft()
if name not in DERIVED_METRICS:
metric_ids.add(indexer.resolve(name))
else:
derived_metric_obj = DERIVED_METRICS[name]
try:
metric_ids |= derived_metric_obj.generate_metric_ids()
except NotSupportedOverCompositeEntityException:
single_entity_constituents = derived_metric_obj.naively_generate_singular_entity_constituents()
metric_names_deque.extend(single_entity_constituents)
if None in metric_ids:
# We are looking for tags that appear in all given metrics.
# A tag cannot appear in a metric if the metric is not even indexed.
raise MetricDoesNotExistInIndexer()
return metric_ids
|
12,387 |
def _mount_source(instance):
container_path = '/usr/lib/python3/dist-packages/cloudinit'
format_variables = {
'name': instance.name,
'cloudinit_path': cloudinit.__path__[0],
'container_path': container_path,
}
log.info(
'Mounting source {cloudinit_path} directly onto LXD container/vm '
'named {name} at {container_path}'.format(**format_variables))
command = (
'lxc config device add {name} host-cloud-init disk '
'source={cloudinit_path} '
'path={container_path}'
).format(**format_variables)
subp(command.split())
|
def _mount_source(pycloudlib_instance):
container_path = '/usr/lib/python3/dist-packages/cloudinit'
format_variables = {
'name': instance.name,
'cloudinit_path': cloudinit.__path__[0],
'container_path': container_path,
}
log.info(
'Mounting source {cloudinit_path} directly onto LXD container/vm '
'named {name} at {container_path}'.format(**format_variables))
command = (
'lxc config device add {name} host-cloud-init disk '
'source={cloudinit_path} '
'path={container_path}'
).format(**format_variables)
subp(command.split())
|
5,840 |
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='highs', callback=None,
options=None, x0=None, integrality=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str, optional
The algorithm used to solve the standard form problem.
:ref:`'highs' <optimize.linprog-highs>` (default),
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
The legacy methods are depreciated and will be removed in SciPy 1.11.0.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1-D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
Callback functions are not currently supported by the HiGHS methods.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
Default: see method-specific documentation.
disp : bool
Set to ``True`` to print convergence messages.
Default: ``False``.
presolve : bool
Set to ``False`` to disable automatic presolve.
Default: ``True``.
All methods except the HiGHS solvers also accept:
tol : float
A tolerance which determines when a residual is "close enough" to
zero to be considered exactly zero.
autoscale : bool
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
Default: ``False``.
rr : bool
Set to ``False`` to disable automatic redundancy removal.
Default: ``True``.
rr_method : string
Method used to identify and remove redundant rows from the
equality constraint matrix after presolve. For problems with
dense input, the available methods for redundancy removal are:
"SVD":
Repeatedly performs singular value decomposition on
the matrix, detecting redundant rows based on nonzeros
in the left singular vectors that correspond with
zero singular values. May be fast when the matrix is
nearly full rank.
"pivot":
Uses the algorithm presented in [5]_ to identify
redundant rows.
"ID":
Uses a randomized interpolative decomposition.
Identifies columns of the matrix transpose not used in
a full-rank interpolative decomposition of the matrix.
None:
Uses "svd" if the matrix is nearly full rank, that is,
the difference between the matrix rank and the number
of rows is less than five. If not, uses "pivot". The
behavior of this default is subject to change without
prior notice.
Default: None.
For problems with sparse input, this option is ignored, and the
pivot-based algorithm presented in [5]_ is used.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
integrality : 1-D array, optional
Indicates the type of integrality constraint on each decision variable.
``0`` : Continuous variable; no integrality constraint.
``1`` : Integer variable; decision variable must be an integer
within `bounds`.
``2`` : Semi-continuous variable; decision variable must be within
`bounds` or take value ``0``.
``3`` : Semi-integer variable; decision variable must be an integer
within `bounds` or take value ``0``.
By default, all variables are continuous. This argument is currently
used only by the ``'highs'`` method and ignored otherwise.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
`'highs-ds'` and
`'highs-ipm'` are interfaces to the
HiGHS simplex and interior-point method solvers [13]_, respectively.
`'highs'` (default) chooses between
the two automatically. These are the fastest linear
programming solvers in SciPy, especially for large, sparse problems;
which of these two is faster is problem-dependent.
The other solvers (`'interior-point'`, `'revised simplex'`, and
`'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
Method *highs-ds* is a wrapper of the C++ high performance dual
revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
as a simplex solver. Method *highs* chooses between the two automatically.
For new code involving `linprog`, we recommend explicitly choosing one of
these three method values.
.. versionadded:: 1.6.0
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying *interior-point*, *revised simplex*, or *simplex*,
a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g., a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
Optionally, the problem is automatically scaled via equilibration [12]_.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts the result to a solution to the original
problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
.. [12] Tomlin, J. A. "On scaling linear programming problems."
Mathematical Programming Study 4 (1975): 146-166.
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
Accessed 4/16/2020 at https://www.maths.ed.ac.uk/hall/HiGHS/#guide
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> from scipy.optimize import linprog
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
>>> res.fun
-22.0
>>> res.x
array([10., -3.])
>>> res.message
'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
"""
meth = method.lower()
methods = {"highs", "highs-ds", "highs-ipm",
"simplex", "revised simplex", "interior-point"}
if meth not in methods:
raise ValueError(f"Unknown solver '{method}'")
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
if integrality and not meth == "highs":
integrality = None
warning_message = ("Only `method='highs'` supports integer "
"constraints. Ignoring `integrality`.")
warn(warning_message, OptimizeWarning)
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
lp, solver_options = _parse_linprog(lp, options, meth)
tol = solver_options.get('tol', 1e-9)
# Give unmodified problem to HiGHS
if meth.startswith('highs'):
if callback is not None:
raise NotImplementedError("HiGHS solvers do not support the "
"callback interface.")
highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
'highs': None}
sol = _linprog_highs(lp, solver=highs_solvers[meth],
**solver_options)
sol['status'], sol['message'] = (
_check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
sol['con'], lp.bounds, tol, sol['message']))
sol['success'] = sol['status'] == 0
return OptimizeResult(sol)
warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
"1.11.0. Please use one of the HiGHS solvers (e.g. "
"`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
lp_o = deepcopy(lp)
# Solve trivial problem, eliminate variables, tighten bounds, etc.
rr_method = solver_options.pop('rr_method', None) # need to pop these;
rr = solver_options.pop('rr', True) # they're not passed to methods
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
rr_method,
tol)
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
if not complete:
A, b, c, c0, x0 = _get_Abc(lp, c0)
if solver_options.pop('autoscale', False):
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
postsolve_args = postsolve_args[:-2] + (C, b_scale)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
postsolve_args=postsolve_args, **solver_options)
# Eliminate artificial variables, re-introduce presolved variables, etc.
disp = solver_options.get('disp', False)
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
if disp:
_display_summary(message, status, fun, iteration)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
|
def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='highs', callback=None,
options=None, x0=None, integrality=None):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str, optional
The algorithm used to solve the standard form problem.
:ref:`'highs' <optimize.linprog-highs>` (default),
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (legacy),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>` (legacy),
and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy) are supported.
The legacy methods are deprecated and will be removed in SciPy 1.11.0.
callback : callable, optional
If a callback function is provided, it will be called at least once per
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
The current solution vector.
fun : float
The current value of the objective function ``c @ x``.
success : bool
``True`` when the algorithm has completed successfully.
slack : 1-D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
An integer representing the status of the algorithm.
``0`` : Optimization proceeding nominally.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The current iteration number.
message : str
A string descriptor of the algorithm status.
Callback functions are not currently supported by the HiGHS methods.
options : dict, optional
A dictionary of solver options. All methods accept the following
options:
maxiter : int
Maximum number of iterations to perform.
Default: see method-specific documentation.
disp : bool
Set to ``True`` to print convergence messages.
Default: ``False``.
presolve : bool
Set to ``False`` to disable automatic presolve.
Default: ``True``.
All methods except the HiGHS solvers also accept:
tol : float
A tolerance which determines when a residual is "close enough" to
zero to be considered exactly zero.
autoscale : bool
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
Default: ``False``.
rr : bool
Set to ``False`` to disable automatic redundancy removal.
Default: ``True``.
rr_method : string
Method used to identify and remove redundant rows from the
equality constraint matrix after presolve. For problems with
dense input, the available methods for redundancy removal are:
"SVD":
Repeatedly performs singular value decomposition on
the matrix, detecting redundant rows based on nonzeros
in the left singular vectors that correspond with
zero singular values. May be fast when the matrix is
nearly full rank.
"pivot":
Uses the algorithm presented in [5]_ to identify
redundant rows.
"ID":
Uses a randomized interpolative decomposition.
Identifies columns of the matrix transpose not used in
a full-rank interpolative decomposition of the matrix.
None:
Uses "svd" if the matrix is nearly full rank, that is,
the difference between the matrix rank and the number
of rows is less than five. If not, uses "pivot". The
behavior of this default is subject to change without
prior notice.
Default: None.
For problems with sparse input, this option is ignored, and the
pivot-based algorithm presented in [5]_ is used.
For method-specific options, see
:func:`show_options('linprog') <show_options>`.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
integrality : 1-D array, optional
Indicates the type of integrality constraint on each decision variable.
``0`` : Continuous variable; no integrality constraint.
``1`` : Integer variable; decision variable must be an integer
within `bounds`.
``2`` : Semi-continuous variable; decision variable must be within
`bounds` or take value ``0``.
``3`` : Semi-integer variable; decision variable must be an integer
within `bounds` or take value ``0``.
By default, all variables are continuous. This argument is currently
used only by the ``'highs'`` method and ignored otherwise.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
nit : int
The total number of iterations performed in all phases.
message : str
A string descriptor of the exit status of the algorithm.
See Also
--------
show_options : Additional options accepted by the solvers.
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter.
`'highs-ds'` and
`'highs-ipm'` are interfaces to the
HiGHS simplex and interior-point method solvers [13]_, respectively.
`'highs'` (default) chooses between
the two automatically. These are the fastest linear
programming solvers in SciPy, especially for large, sparse problems;
which of these two is faster is problem-dependent.
The other solvers (`'interior-point'`, `'revised simplex'`, and
`'simplex'`) are legacy methods and will be removed in SciPy 1.11.0.
Method *highs-ds* is a wrapper of the C++ high performance dual
revised simplex implementation (HSOL) [13]_, [14]_. Method *highs-ipm*
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
as a simplex solver. Method *highs* chooses between the two automatically.
For new code involving `linprog`, we recommend explicitly choosing one of
these three method values.
.. versionadded:: 1.6.0
Method *interior-point* uses the primal-dual path following algorithm
as outlined in [4]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
.. versionadded:: 1.3.0
Method *simplex* uses a traditional, full-tableau implementation of
Dantzig's simplex algorithm [1]_, [2]_ (*not* the
Nelder-Mead simplex). This algorithm is included for backwards
compatibility and educational purposes.
.. versionadded:: 0.15.0
Before applying *interior-point*, *revised simplex*, or *simplex*,
a presolve procedure based on [8]_ attempts
to identify trivial infeasibilities, trivial unboundedness, and potential
problem simplifications. Specifically, it checks for:
- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;
- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained
variables;
- column singletons in ``A_eq``, representing fixed variables; and
- column singletons in ``A_ub``, representing simple bounds.
If presolve reveals that the problem is unbounded (e.g. an unconstrained
and unbounded variable has negative cost) or infeasible (e.g., a row of
zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver
terminates with the appropriate status code. Note that presolve terminates
as soon as any sign of unboundedness is detected; consequently, a problem
may be reported as unbounded when in reality the problem is infeasible
(but infeasibility has not been detected yet). Therefore, if it is
important to know whether the problem is actually infeasible, solve the
problem again with option ``presolve=False``.
If neither infeasibility nor unboundedness are detected in a single pass
of the presolve, bounds are tightened where possible and fixed
variables are removed from the problem. Then, linearly dependent rows
of the ``A_eq`` matrix are removed, (unless they represent an
infeasibility) to avoid numerical difficulties in the primary solve
routine. Note that rows that are nearly linearly dependent (within a
prescribed tolerance) may also be removed, which can change the optimal
solution in rare cases. If this is a concern, eliminate redundancy from
your problem formulation and run with option ``rr=False`` or
``presolve=False``.
Several potential improvements can be made here: additional presolve
checks outlined in [8]_ should be implemented, the presolve routine should
be run multiple times (until no further simplifications can be made), and
more of the efficiency improvements from [5]_ should be implemented in the
redundancy removal routines.
After presolve, the problem is transformed to standard form by converting
the (tightened) simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
Optionally, the problem is automatically scaled via equilibration [12]_.
The selected algorithm solves the standard form problem, and a
postprocessing routine converts the result to a solution to the original
problem.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [5] Andersen, Erling D. "Finding all linearly dependent rows in
large-scale linear programming." Optimization Methods and Software
6.3 (1995): 219-227.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods."
Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at
http://www.4er.org/CourseNotes/Book%20B/B-III.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
.. [12] Tomlin, J. A. "On scaling linear programming problems."
Mathematical Programming Study 4 (1975): 146-166.
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
Accessed 4/16/2020 at https://www.maths.ed.ac.uk/hall/HiGHS/#guide
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
Examples
--------
Consider the following problem:
.. math::
\min_{x_0, x_1} \ -x_0 + 4x_1 & \\
\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\
-x_0 - 2x_1 & \geq -4,\\
x_1 & \geq -3.
The problem is not presented in the form accepted by `linprog`. This is
easily remedied by converting the "greater than" inequality
constraint to a "less than" inequality constraint by
multiplying both sides by a factor of :math:`-1`. Note also that the last
constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`.
Finally, since there are no bounds on :math:`x_0`, we must explicitly
specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the
default is for variables to be non-negative. After collecting coeffecients
into arrays and tuples, the input for this problem is:
>>> from scipy.optimize import linprog
>>> c = [-1, 4]
>>> A = [[-3, 1], [1, 2]]
>>> b = [6, 4]
>>> x0_bounds = (None, None)
>>> x1_bounds = (-3, None)
>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])
>>> res.fun
-22.0
>>> res.x
array([10., -3.])
>>> res.message
'Optimization terminated successfully. (HiGHS Status 7: Optimal)'
"""
meth = method.lower()
methods = {"highs", "highs-ds", "highs-ipm",
"simplex", "revised simplex", "interior-point"}
if meth not in methods:
raise ValueError(f"Unknown solver '{method}'")
if x0 is not None and meth != "revised simplex":
warning_message = "x0 is used only when method is 'revised simplex'. "
warn(warning_message, OptimizeWarning)
if integrality and not meth == "highs":
integrality = None
warning_message = ("Only `method='highs'` supports integer "
"constraints. Ignoring `integrality`.")
warn(warning_message, OptimizeWarning)
lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, integrality)
lp, solver_options = _parse_linprog(lp, options, meth)
tol = solver_options.get('tol', 1e-9)
# Give unmodified problem to HiGHS
if meth.startswith('highs'):
if callback is not None:
raise NotImplementedError("HiGHS solvers do not support the "
"callback interface.")
highs_solvers = {'highs-ipm': 'ipm', 'highs-ds': 'simplex',
'highs': None}
sol = _linprog_highs(lp, solver=highs_solvers[meth],
**solver_options)
sol['status'], sol['message'] = (
_check_result(sol['x'], sol['fun'], sol['status'], sol['slack'],
sol['con'], lp.bounds, tol, sol['message']))
sol['success'] = sol['status'] == 0
return OptimizeResult(sol)
warn(f"`method='{meth}'` is deprecated and will be removed in SciPy "
"1.11.0. Please use one of the HiGHS solvers (e.g. "
"`method='highs'`) in new code.", DeprecationWarning, stacklevel=2)
iteration = 0
complete = False # will become True if solved in presolve
undo = []
# Keep the original arrays to calculate slack/residuals for original
# problem.
lp_o = deepcopy(lp)
# Solve trivial problem, eliminate variables, tighten bounds, etc.
rr_method = solver_options.pop('rr_method', None) # need to pop these;
rr = solver_options.pop('rr', True) # they're not passed to methods
c0 = 0 # we might get a constant term in the objective
if solver_options.pop('presolve', True):
(lp, c0, x, undo, complete, status, message) = _presolve(lp, rr,
rr_method,
tol)
C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used
postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)
if not complete:
A, b, c, c0, x0 = _get_Abc(lp, c0)
if solver_options.pop('autoscale', False):
A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)
postsolve_args = postsolve_args[:-2] + (C, b_scale)
if meth == 'simplex':
x, status, message, iteration = _linprog_simplex(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'interior-point':
x, status, message, iteration = _linprog_ip(
c, c0=c0, A=A, b=b, callback=callback,
postsolve_args=postsolve_args, **solver_options)
elif meth == 'revised simplex':
x, status, message, iteration = _linprog_rs(
c, c0=c0, A=A, b=b, x0=x0, callback=callback,
postsolve_args=postsolve_args, **solver_options)
# Eliminate artificial variables, re-introduce presolved variables, etc.
disp = solver_options.get('disp', False)
x, fun, slack, con = _postsolve(x, postsolve_args, complete)
status, message = _check_result(x, fun, status, slack, con, lp_o.bounds, tol, message)
if disp:
_display_summary(message, status, fun, iteration)
sol = {
'x': x,
'fun': fun,
'slack': slack,
'con': con,
'status': status,
'message': message,
'nit': iteration,
'success': status == 0}
return OptimizeResult(sol)
|
1,801 |
def _encode_categories(data, categorical_indices, bin_categories,
missing_values_bin_idx, binned):
"""Encode categories.
Missing values and unknown values are mapped to the missing bin.
Parameters
----------
data : ndarray of shape (n_samples, n_features)
data to encoded.
categorical_indices : list of int
columns in ``data`` that are categorical.
bin_categories : list of arrays
categories learned during training that corresponds to
``categorical_indices``.
missing_values_bin_idx : uint8
The index of the bin where missing values are mapped.
binned : ndarray, shape (n_samples, n_features)
Output array
"""
# TODO: This whole function can most likely be made faster with cython
# and prange
for i, f_idx in enumerate(categorical_indices):
col_data = data[:, f_idx]
col_bin_cats = bin_categories[i]
binned[:, f_idx] = np.searchsorted(col_bin_cats, col_data)
# missing values
missing = np.isnan(col_data)
if missing.any():
binned[missing, f_idx] = missing_values_bin_idx
# unknown categories
# TODO: calling unique alot of time, maybe this can be improved.
unique_col_data = np.unique(col_data)
diff = np.setdiff1d(unique_col_data, col_bin_cats, assume_unique=True)
if diff.size:
invalid_mask = ~np.in1d(col_data, col_bin_cats)
binned[invalid_mask, f_idx] = missing_values_bin_idx
|
def _encode_categories(data, categorical_indices, bin_categories,
missing_values_bin_idx, binned):
"""Encode categories.
Missing values and unknown values are mapped to the missing bin.
Parameters
----------
data : ndarray of shape (n_samples, n_features)
data to encoded.
categorical_indices : list of int
columns in ``data`` that are categorical.
bin_categories : list of arrays
categories learned during training that corresponds to
``categorical_indices``. The arrays should be sorted and are of size n_categories.
missing_values_bin_idx : uint8
The index of the bin where missing values are mapped.
binned : ndarray, shape (n_samples, n_features)
Output array
"""
# TODO: This whole function can most likely be made faster with cython
# and prange
for i, f_idx in enumerate(categorical_indices):
col_data = data[:, f_idx]
col_bin_cats = bin_categories[i]
binned[:, f_idx] = np.searchsorted(col_bin_cats, col_data)
# missing values
missing = np.isnan(col_data)
if missing.any():
binned[missing, f_idx] = missing_values_bin_idx
# unknown categories
# TODO: calling unique alot of time, maybe this can be improved.
unique_col_data = np.unique(col_data)
diff = np.setdiff1d(unique_col_data, col_bin_cats, assume_unique=True)
if diff.size:
invalid_mask = ~np.in1d(col_data, col_bin_cats)
binned[invalid_mask, f_idx] = missing_values_bin_idx
|
45,662 |
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
the user to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust the padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change the default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
def layout():
return html.Div(id='oncoprint-body', children=[
dash_bio.OncoPrint(
id='oncoprint-chart',
height=550,
data=[]
),
html.Div(id='oncoprint-control-tabs', children=[
dcc.Tabs(
id='oncoprint-tabs',
children=[
dcc.Tab(
label='About',
value='what-is',
children=html.Div(className='oncoprint-tab', children=[
html.H4(
"What is OncoPrint?"
),
html.P(
"""
The OncoPrint component is used to view multiple genetic
alteration events through an interactive and zoomable
heatmap. It is a React/Dash port of the popular
oncoPrint() function from the BioConductor R
package. Under the hood, the rendering is done using
Plotly.js built upon D3. Plotly's interactivity allows
the user to bind clicks and hovers to genetic events,
allowing the user to create complex bioinformatic apps
or workflows that rely on crossfiltering.
"""
),
html.P(
"""
Read more about the component here:
https://github.com/plotly/react-oncoprint
"""
)
])
),
dcc.Tab(
label='Data',
value='data',
children=html.Div(className='oncoprint-tab', children=[
html.Div([
html.Div(
className='oncoprint-option-name',
children='Select dataset'
),
dcc.Dropdown(
id='oncoprint-dropdown',
className='oncoprint-select',
options=[
{
'label': '{}.json'.format(ds),
'value': ds
}
for ds in DATASETS
],
value='cBioPortalData',
),
]),
html.Hr(
className='oncoprint-separator'
),
html.Div([
html.H4('Hover, click, or event data'),
html.Div(
id='oncoprint-events'
),
])
])
),
dcc.Tab(
label='View',
value='view',
children=html.Div(className='oncoprint-tab', children=[
html.H4('Layout'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Overview'
),
daq.ToggleSwitch(
id='oncoprint-show-overview',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Legend'
),
daq.ToggleSwitch(
id='oncoprint-show-legend',
label=['hide', 'show'],
color='#009DFF',
size=35,
value=True
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Padding'
),
dcc.Slider(
className='oncoprint-slider',
id='oncoprint-padding-input',
value=0.05,
min=0,
max=0.1,
step=0.01,
marks={
'0': '0',
'0.02': '0.02',
'0.04': '0.04',
'0.06': '0.06',
'0.08': '0.08',
'0.1': '0.1',
},
),
html.Br(),
html.Div(
'Adjust the padding (as percentage) '
'between two tracks.'
),
],
),
html.Hr(className='oncoprint-separator'),
html.Div([
html.H4('Colors'),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Track color'
),
html.P(
'Change default background '
'color for the tracks.'
),
daq.ColorPicker(
id='oncoprint-tracks-color',
value={'hex': '#AAAAAA'}
),
],
),
html.Hr(className='oncoprint-separator'),
html.H6("Mutation colors"),
html.P(
"Select a mutation type and a color "
"to customize its look."
),
html.Div(children=[
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation type'
),
dcc.Dropdown(
id='oncoprint-colorscale-mutation-dropdown',
options=[
{'label': mut_type, 'value': mut_type}
for mut_type in COLORSCALE_MUTATIONS_OPT
],
value=COLORSCALE_MUTATIONS_OPT[0],
),
],
),
html.Div(
children=[
html.Div(
className='oncoprint-option-name',
children='Mutation color'
),
daq.ColorPicker(
id='oncoprint-mutation-color',
value={'hex': COLORSCALE_COLORS_OPT[0]}
)
],
),
])
])
])
)
]
)
]),
dcc.Store(id='oncoprint-store'),
]),
|
39,377 |
def merge(
datasets,
merge_points=True,
main_has_priority=True,
progress_bar=False
):
"""Merge several datasets.
.. note::
The behavior of this filter varies from the
:func:`PolyDataFilters.boolean_union` filter. This filter
does not attempt to create a manifold mesh and will include
internal surfaces when two meshes overlap.
datasets : sequence of :class:`pyvista.Dataset`
Sequence of datasets. Can be of any :class:`pyvista.Dataset`
merge_points : bool, optional
Merge equivalent points when ``True``. Defaults to ``True``.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
pyvista.DataSet
:class:`pyvista.PolyData` if all items in datasets are
:class:`pyvista.PolyData`, otherwise returns a
:class:`pyvista.UnstructuredGrid`.
Examples
--------
Merge two polydata datasets.
>>> import pyvista
>>> sphere = pyvista.Sphere(center=(0, 0, 1))
>>> cube = pyvista.Cube()
>>> mesh = pyvista.merge([cube, sphere])
>>> mesh.plot()
"""
if not isinstance(datasets, collections.Sequence):
raise TypeError(f"Expected a sequence, got {type(datasets)}")
if len(datasets) < 1:
raise ValueError("Expected at least one dataset.")
first = datasets[0]
if not isinstance(first, pyvista.DataSet):
raise TypeError(f"Expected pyvista.DataSet, not {type(first)}")
return datasets[0].merge(
datasets[1:],
merge_points=merge_points,
main_has_priority=main_has_priority,
progress_bar=progress_bar,
)
|
def merge(
datasets,
merge_points=True,
main_has_priority=True,
progress_bar=False
):
"""Merge several datasets.
.. note::
The behavior of this filter varies from the
:func:`PolyDataFilters.boolean_union` filter. This filter
does not attempt to create a manifold mesh and will include
internal surfaces when two meshes overlap.
datasets : sequence of :class:`pyvista.Dataset`
Sequence of datasets. Can be of any :class:`pyvista.Dataset`
merge_points : bool, optional
Merge equivalent points when ``True``. Defaults to ``True``.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
main_has_priority : bool, optional
When this parameter is ``True`` and ``merge_points=True``,
the arrays of the merging grids will be overwritten
by the original main mesh.
progress_bar : bool, optional
Display a progress bar to indicate progress.
Returns
-------
pyvista.DataSet
:class:`pyvista.PolyData` if all items in datasets are
:class:`pyvista.PolyData`, otherwise returns a
:class:`pyvista.UnstructuredGrid`.
Examples
--------
Merge two polydata datasets.
>>> import pyvista
>>> sphere = pyvista.Sphere(center=(0, 0, 1))
>>> cube = pyvista.Cube()
>>> mesh = pyvista.merge([cube, sphere])
>>> mesh.plot()
"""
if not isinstance(datasets, collections.Sequence):
raise TypeError(f"Expected a sequence, got {type(datasets)}")
if len(datasets) < 1:
raise ValueError("Expected at least one dataset.")
first = datasets[0]
if not isinstance(first, pyvista.DataSet):
raise TypeError(f"Expected pyvista.DataSet, not {type(first).__name__}")
return datasets[0].merge(
datasets[1:],
merge_points=merge_points,
main_has_priority=main_has_priority,
progress_bar=progress_bar,
)
|
34,465 |
def pytest_runtest_setup(item):
supported_platforms = PLATFORMS.intersection(
mark.name for mark in item.iter_markers()
)
if supported_platforms and sys.platform not in supported_platforms:
pytest.skip("cannot run on platform {}".format(sys.platform))
|
def pytest_runtest_setup(item):
supported_platforms = PLATFORMS.intersection(
mark.name for mark in item.iter_markers()
)
if supported_platforms and sys.platform not in supported_platforms:
pytest.skip(f"cannot run on platform {sys.platform}")
|
12,044 |
def main():
fname = iris.sample_data_path("air_temp.pp")
temperature = iris.load_cube(fname)
collapsed_temp = temperature.collapsed("longitude", MEAN)
# Set y axes with -90 and 90 limits and spacing of 15 per tick.
yticks = np.arange(-90, 105, 15)
ylim = [-90, 90]
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree())
plt.sca(ax1)
im = iplt.contourf(temperature, cmap="RdYlBu_r")
ax1.coastlines()
ax1.gridlines()
ax1.set_xticks([-180, -90, 0, 90, 180], crs=ccrs.PlateCarree())
ax1.set_yticks(yticks, crs=ccrs.PlateCarree())
ax1.set_title("Air Temperature")
ax1.set_ylabel("latitude")
ax1.set_xlabel("longitude")
ax1.set_ylim(*ylim)
divider = make_axes_locatable(ax1)
# Gives the air temperature bar size, colour and a title.
ax2 = divider.new_vertical(
size="5%", pad=0.5, axes_class=plt.Axes, pack_start=True
)
fig.add_axes(ax2)
plt.sca(ax2)
cbar = plt.colorbar(im, cax=ax2, orientation="horizontal")
cbar.ax.set_xlabel("Air Temperature [k]")
# Round each tick for the third ax to the nearest 20 (ready for use).
data_max = collapsed_temp.data.max()
x_max = data_max - data_max % -20
data_min = collapsed_temp.data.min()
x_min = data_min - data_min % 20
# Plot "collapsed_temp" on the mean graph and set the ticks and titles on the axes.
ax3 = divider.new_horizontal(size="30%", pad=0.4, axes_class=plt.Axes)
fig.add_axes(ax3)
plt.sca(ax3)
iplt.plot(collapsed_temp, collapsed_temp.coord("latitude"))
ax3.axvline(0, color="k", linewidth=0.5)
ax3.set_ylim(*ylim)
ax3.set_title("Zonal mean")
ax3.set_ylabel("latitude")
ax3.set_xlabel("Air Temperature [k]")
ax3.yaxis.set_label_position("right")
ax3.yaxis.tick_right()
ax3.set_yticks(yticks)
ax3.set_xlim(x_min, x_max)
plt.show()
|
def main():
fname = iris.sample_data_path("air_temp.pp")
temperature = iris.load_cube(fname)
collapsed_temp = temperature.collapsed("longitude", MEAN)
# Set y axes with -90 and 90 limits and spacing of 15 per tick.
yticks = np.arange(-90, 105, 15)
ylim = [-90, 90]
fig = plt.figure(figsize=[12, 4])
ax1 = fig.add_subplot(111, projection=ccrs.PlateCarree())
plt.sca(ax1)
im = iplt.contourf(temperature, cmap="RdYlBu_r")
ax1.coastlines()
ax1.gridlines()
ax1.set_xticks([-180, -90, 0, 90, 180], crs=ccrs.PlateCarree())
ax1.set_yticks(yticks, crs=ccrs.PlateCarree())
ax1.set_title("Air Temperature")
ax1.set_ylabel("latitude")
ax1.set_xlabel("longitude")
ax1.set_ylim(*ylim)
divider = make_axes_locatable(ax1)
# Gives the air temperature bar size, colour and a title.
ax2 = divider.new_vertical(
size="5%", pad=0.5, axes_class=plt.Axes, pack_start=True
)
fig.add_axes(ax2)
plt.sca(ax2)
cbar = plt.colorbar(im, cax=ax2, orientation="horizontal")
cbar.ax.set_xlabel("Air Temperature [K]")
# Round each tick for the third ax to the nearest 20 (ready for use).
data_max = collapsed_temp.data.max()
x_max = data_max - data_max % -20
data_min = collapsed_temp.data.min()
x_min = data_min - data_min % 20
# Plot "collapsed_temp" on the mean graph and set the ticks and titles on the axes.
ax3 = divider.new_horizontal(size="30%", pad=0.4, axes_class=plt.Axes)
fig.add_axes(ax3)
plt.sca(ax3)
iplt.plot(collapsed_temp, collapsed_temp.coord("latitude"))
ax3.axvline(0, color="k", linewidth=0.5)
ax3.set_ylim(*ylim)
ax3.set_title("Zonal mean")
ax3.set_ylabel("latitude")
ax3.set_xlabel("Air Temperature [k]")
ax3.yaxis.set_label_position("right")
ax3.yaxis.tick_right()
ax3.set_yticks(yticks)
ax3.set_xlim(x_min, x_max)
plt.show()
|
8,592 |
def dumps(ob, trim=False, **kw):
"""
Dump a WKT representation of a geometry to a string.
Parameters
----------
ob :
A geometry object of any type to be dumped to WKT.
trim :
Remove excess decimals from the WKT.
rounding_precision (GEOS 3.3+):
Round output to the specified number of digits
output_dimension (GEOS 3.3+):
Force removal of dimensions above the one specified.
Defaults to 3.
Returns
-------
input geometry as WKT string
"""
return geos.WKTWriter(geos.lgeos, trim=trim, **kw).write(ob)
|
def dumps(ob, trim=False, **kw):
"""
Dump a WKT representation of a geometry to a string.
Parameters
----------
ob :
A geometry object of any type to be dumped to WKT.
trim : bool, default False
Remove excess decimals from the WKT.
rounding_precision (GEOS 3.3+):
Round output to the specified number of digits
output_dimension (GEOS 3.3+):
Force removal of dimensions above the one specified.
Defaults to 3.
Returns
-------
input geometry as WKT string
"""
return geos.WKTWriter(geos.lgeos, trim=trim, **kw).write(ob)
|
2,701 |
def test_nan_manhattan_distances_equal_to_manhattan_distances():
rng = np.random.RandomState(714)
X = rng.randn(3, 4)
Y = rng.randn(4, 4)
normal_distance = manhattan_distances(X, Y=Y)
nan_distance = nan_manhattan_distances(X, Y=Y)
assert_allclose(normal_distance, nan_distance)
|
def test_nan_manhattan_distances_equal_to_manhattan_distances():
rng = np.random.RandomState(714)
X = rng.randn(3, 4)
Y = rng.randn(4, 4)
normal_distance = manhattan_distances(X, Y=Y)
nan_distance = nan_manhattan_distances(X, Y=Y)
assert_array_equal(normal_distance, nan_distance)
|
27,393 |
def auxreader(auxdata: str, format: Optional = None, **kwargs) -> AuxReader:
""" Return an auxiliary reader instance for *auxdata*.
An appropriate reader class is first obtained using
:func:`get_auxreader_for`, and an auxiliary reader instance for *auxdata*
then created and returned.
Parameters
----------
auxdata
Auxiliary data (e.g. filename of file containing auxiliary data).
format
(Optional). The format of *auxdata*, if known.
**kwargs
Additional AuxReader options.
Returns
-------
:class:`~MDAnalysis.auxiliary.base.AuxReader` instance
Appropriate auxiliary reader instance for *auxdata*.
"""
reader = get_auxreader_for(auxdata, format=format)
return reader(auxdata, **kwargs)
|
def auxreader(auxdata: str, format: Optional[str] = None, **kwargs) -> AuxReader:
""" Return an auxiliary reader instance for *auxdata*.
An appropriate reader class is first obtained using
:func:`get_auxreader_for`, and an auxiliary reader instance for *auxdata*
then created and returned.
Parameters
----------
auxdata
Auxiliary data (e.g. filename of file containing auxiliary data).
format
(Optional). The format of *auxdata*, if known.
**kwargs
Additional AuxReader options.
Returns
-------
:class:`~MDAnalysis.auxiliary.base.AuxReader` instance
Appropriate auxiliary reader instance for *auxdata*.
"""
reader = get_auxreader_for(auxdata, format=format)
return reader(auxdata, **kwargs)
|
57,740 |
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to update the commit in the index.json file,
# this is to be able to identify all changed packs in the next upload
commit = previous_commit_hash
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
|
def upload_index_to_storage(index_folder_path: str, extract_destination_path: str, index_blob: Any,
build_number: str, private_packs: list, current_commit_hash: str,
index_generation: int, is_private: bool = False, force_upload: bool = False,
previous_commit_hash: str = None):
"""
Upload updated index zip to cloud storage.
:param index_folder_path: index folder full path.
:param extract_destination_path: extract folder full path.
:param index_blob: google cloud storage object that represents index.zip blob.
:param build_number: circleCI build number, used as an index revision.
:param private_packs: List of private packs and their price.
:param current_commit_hash: last commit hash of head.
:param index_generation: downloaded index generation.
:param is_private: Indicates if upload is private.
:param force_upload: Indicates if force upload or not.
:param previous_commit_hash: The previous commit hash to diff with.
:returns None.
"""
if force_upload:
# If we force upload we don't want to overwrite the last commit hash in the index.json file,
# such that in the next upload run we could still be able to compare the current commit to the last upload commit
# hash and identify all changed packs.
commit = previous_commit_hash
else:
# Otherwise, update the index with the current commit hash (the commit of the upload)
commit = current_commit_hash
with open(os.path.join(index_folder_path, f"{GCPConfig.INDEX_NAME}.json"), "w+") as index_file:
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime(Metadata.DATE_FORMAT),
'packs': private_packs,
'commit': commit
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(index_folder_path)
index_zip_path = shutil.make_archive(base_name=index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
index_blob.reload()
current_index_generation = index_blob.generation
index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
if is_private or current_index_generation == index_generation:
index_blob.upload_from_filename(index_zip_path)
logging.success(f"Finished uploading {GCPConfig.INDEX_NAME}.zip to storage.")
else:
logging.critical(f"Failed in uploading {GCPConfig.INDEX_NAME}, mismatch in index file generation")
logging.critical(f"Downloaded index generation: {index_generation}")
logging.critical(f"Current index generation: {current_index_generation}")
sys.exit(0)
except Exception:
logging.exception(f"Failed in uploading {GCPConfig.INDEX_NAME}.")
sys.exit(1)
finally:
shutil.rmtree(index_folder_path)
|
37,045 |
def plot_histogram(data, figsize=(7, 5), color=None, number_to_keep=None,
sort='asc', target_string=None,
legend=None, bar_labels=True, title=None):
"""Plot a histogram of data.
Args:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
figsize (tuple): Figure size in inches.
color (list or str): String or list of strings for histogram bar colors.
number_to_keep (int): The number of terms to plot and rest
is made into a single bar called 'rest'.
sort (string): Could be 'asc', 'desc', or 'hamming'.
target_string (str): Target string if 'sort' is a distance measure.
legend(list): A list of strings to use for labels of the data.
The number of entries must match the length of data (if data is a
list or 1 if it's a dict)
bar_labels (bool): Label each bar in histogram with probability value.
title (str): A string to use for the plot title
Returns:
matplotlib.Figure: A figure for the rendered histogram.
Raises:
ImportError: Matplotlib not available.
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
if sort not in VALID_SORTS:
raise VisualizationError("Value of sort option, %s, isn't a "
"valid choice. Must be 'asc', "
"'desc', or 'hamming'")
elif sort in DIST_MEAS.keys() and target_string is None:
err_msg = 'Must define target_state when using distance measure.'
raise VisualizationError(err_msg)
if isinstance(data, dict):
data = [data]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match "
"number of input executions: %s" %
(len(legend), len(data)))
fig, ax = plt.subplots(figsize=figsize)
labels = list(sorted(
functools.reduce(lambda x, y: x.union(y.keys()), data, set())))
if number_to_keep is not None:
labels.append('rest')
if sort in DIST_MEAS.keys():
dist = []
for item in labels:
dist.append(DIST_MEAS[sort](item, target_string))
labels = [list(x) for x in zip(*sorted(zip(dist, labels),
key=lambda pair: pair[0]))][1]
labels_dict = OrderedDict()
# Set bar colors
if color is None:
color = ['#648fff', '#dc267f', '#785ef0', '#ffb000', '#fe6100']
elif isinstance(color, str):
color = [color]
all_pvalues = []
length = len(data)
for item, execution in enumerate(data):
if number_to_keep is not None:
data_temp = dict(Counter(execution).most_common(number_to_keep))
data_temp["rest"] = sum(execution.values()) - sum(data_temp.values())
execution = data_temp
values = []
for key in labels:
if key not in execution:
if number_to_keep is None:
labels_dict[key] = 1
values.append(0)
else:
values.append(-1)
else:
labels_dict[key] = 1
values.append(execution[key])
values = np.array(values, dtype=float)
where_idx = np.where(values >= 0)[0]
pvalues = values[where_idx] / sum(values[where_idx])
for value in pvalues:
all_pvalues.append(value)
numelem = len(values[where_idx])
ind = np.arange(numelem) # the x locations for the groups
width = 1/(len(data)+1) # the width of the bars
rects = []
for idx, val in enumerate(pvalues):
label = None
if not idx and legend:
label = legend[item]
if val >= 0:
rects.append(ax.bar(idx+item*width, val, width, label=label,
color=color[item % len(color)],
zorder=2))
bar_center = (width / 2) * (length - 1)
ax.set_xticks(ind + bar_center)
ax.set_xticklabels(labels_dict.keys(), fontsize=14, rotation=70)
# attach some text labels
if bar_labels:
for rect in rects:
for rec in rect:
height = rec.get_height()
if height >= 1e-3:
ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height,
'%.3f' % float(height),
ha='center', va='bottom', zorder=3)
else:
ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height,
'0',
ha='center', va='bottom', zorder=3)
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=14)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in all_pvalues])])])
if sort == 'desc':
ax.invert_xaxis()
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.set_facecolor('#eeeeee')
plt.grid(which='major', axis='y', zorder=0, linestyle='--')
if title:
plt.title(title)
if legend:
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.0), ncol=1,
borderaxespad=0, frameon=True, fontsize=12)
if fig:
plt.close(fig)
return fig
|
def plot_histogram(data, figsize=(7, 5), color=None, number_to_keep=None,
sort='asc', target_string=None,
legend=None, bar_labels=True, title=None):
"""Plot a histogram of data.
Args:
data (list or dict): This is either a list of dictionaries or a single
dict containing the values to represent (ex {'001': 130})
figsize (tuple): Figure size in inches.
color (list or str): String or list of strings for histogram bar colors.
number_to_keep (int): The number of terms to plot and rest
is made into a single bar called 'rest'.
sort (string): Could be 'asc', 'desc', or 'hamming'.
target_string (str): Target string if 'sort' is a distance measure.
legend(list): A list of strings to use for labels of the data.
The number of entries must match the length of data (if data is a
list or 1 if it's a dict)
bar_labels (bool): Label each bar in histogram with probability value.
title (str): A string to use for the plot title
Returns:
matplotlib.Figure: A figure for the rendered histogram.
Raises:
ImportError: Matplotlib not available.
VisualizationError: When legend is provided and the length doesn't
match the input data.
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
if sort not in VALID_SORTS:
raise VisualizationError("Value of sort option, %s, isn't a "
"valid choice. Must be 'asc', "
"'desc', or 'hamming'")
elif sort in DIST_MEAS.keys() and target_string is None:
err_msg = 'Must define target_string when using distance measure.'
raise VisualizationError(err_msg)
if isinstance(data, dict):
data = [data]
if legend and len(legend) != len(data):
raise VisualizationError("Length of legendL (%s) doesn't match "
"number of input executions: %s" %
(len(legend), len(data)))
fig, ax = plt.subplots(figsize=figsize)
labels = list(sorted(
functools.reduce(lambda x, y: x.union(y.keys()), data, set())))
if number_to_keep is not None:
labels.append('rest')
if sort in DIST_MEAS.keys():
dist = []
for item in labels:
dist.append(DIST_MEAS[sort](item, target_string))
labels = [list(x) for x in zip(*sorted(zip(dist, labels),
key=lambda pair: pair[0]))][1]
labels_dict = OrderedDict()
# Set bar colors
if color is None:
color = ['#648fff', '#dc267f', '#785ef0', '#ffb000', '#fe6100']
elif isinstance(color, str):
color = [color]
all_pvalues = []
length = len(data)
for item, execution in enumerate(data):
if number_to_keep is not None:
data_temp = dict(Counter(execution).most_common(number_to_keep))
data_temp["rest"] = sum(execution.values()) - sum(data_temp.values())
execution = data_temp
values = []
for key in labels:
if key not in execution:
if number_to_keep is None:
labels_dict[key] = 1
values.append(0)
else:
values.append(-1)
else:
labels_dict[key] = 1
values.append(execution[key])
values = np.array(values, dtype=float)
where_idx = np.where(values >= 0)[0]
pvalues = values[where_idx] / sum(values[where_idx])
for value in pvalues:
all_pvalues.append(value)
numelem = len(values[where_idx])
ind = np.arange(numelem) # the x locations for the groups
width = 1/(len(data)+1) # the width of the bars
rects = []
for idx, val in enumerate(pvalues):
label = None
if not idx and legend:
label = legend[item]
if val >= 0:
rects.append(ax.bar(idx+item*width, val, width, label=label,
color=color[item % len(color)],
zorder=2))
bar_center = (width / 2) * (length - 1)
ax.set_xticks(ind + bar_center)
ax.set_xticklabels(labels_dict.keys(), fontsize=14, rotation=70)
# attach some text labels
if bar_labels:
for rect in rects:
for rec in rect:
height = rec.get_height()
if height >= 1e-3:
ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height,
'%.3f' % float(height),
ha='center', va='bottom', zorder=3)
else:
ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height,
'0',
ha='center', va='bottom', zorder=3)
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=14)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in all_pvalues])])])
if sort == 'desc':
ax.invert_xaxis()
ax.yaxis.set_major_locator(MaxNLocator(5))
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.set_facecolor('#eeeeee')
plt.grid(which='major', axis='y', zorder=0, linestyle='--')
if title:
plt.title(title)
if legend:
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.0), ncol=1,
borderaxespad=0, frameon=True, fontsize=12)
if fig:
plt.close(fig)
return fig
|
24,983 |
def get_pylint_home() -> str:
"""Return the pylint home."""
if "PYLINTHOME" in os.environ:
return os.environ["PYLINTHOME"]
pylint_home = DEFAULT_PYLINT_HOME
# The spam prevention is due to pylint being used in parallel by
# pre-commit, and the message being spammy in this context
# Also if you work with old version of pylint that recreate the
# old pylint home, you can get the old message for a long time.
prefix_spam_prevention = "pylint_warned_about_old_cache_already"
spam_prevention_file = os.path.join(
pylint_home,
datetime.now().strftime(prefix_spam_prevention + "_%Y-%m-%d.temp"),
)
old_home = os.path.join(USER_HOME, OLD_DEFAULT_PYLINT_HOME)
if os.path.exists(old_home) and not os.path.exists(spam_prevention_file):
print(
f"PYLINTHOME is now '{pylint_home}' but obsolescent '{old_home}' is found; "
"you can safely remove the latter",
file=sys.stderr,
)
# Remove old spam prevention file
if os.path.exists(pylint_home):
for filename in os.listdir(pylint_home):
if prefix_spam_prevention in filename:
try:
os.remove(os.path.join(pylint_home, filename))
except OSError:
pass
# Create spam prevention file for today
try:
Path(pylint_home).mkdir(parents=True, exist_ok=True)
with open(spam_prevention_file, "w", encoding="utf8") as f:
f.write("")
except Exception as exc: # pylint: disable=broad-except
print(
"Can't write the file that was supposed to "
f"prevent 'pylint.d' deprecation spam in {pylint_home} because of {exc}."
)
return pylint_home
|
def _get_pylint_home() -> str:
"""Return the pylint home."""
if "PYLINTHOME" in os.environ:
return os.environ["PYLINTHOME"]
pylint_home = DEFAULT_PYLINT_HOME
# The spam prevention is due to pylint being used in parallel by
# pre-commit, and the message being spammy in this context
# Also if you work with old version of pylint that recreate the
# old pylint home, you can get the old message for a long time.
prefix_spam_prevention = "pylint_warned_about_old_cache_already"
spam_prevention_file = os.path.join(
pylint_home,
datetime.now().strftime(prefix_spam_prevention + "_%Y-%m-%d.temp"),
)
old_home = os.path.join(USER_HOME, OLD_DEFAULT_PYLINT_HOME)
if os.path.exists(old_home) and not os.path.exists(spam_prevention_file):
print(
f"PYLINTHOME is now '{pylint_home}' but obsolescent '{old_home}' is found; "
"you can safely remove the latter",
file=sys.stderr,
)
# Remove old spam prevention file
if os.path.exists(pylint_home):
for filename in os.listdir(pylint_home):
if prefix_spam_prevention in filename:
try:
os.remove(os.path.join(pylint_home, filename))
except OSError:
pass
# Create spam prevention file for today
try:
Path(pylint_home).mkdir(parents=True, exist_ok=True)
with open(spam_prevention_file, "w", encoding="utf8") as f:
f.write("")
except Exception as exc: # pylint: disable=broad-except
print(
"Can't write the file that was supposed to "
f"prevent 'pylint.d' deprecation spam in {pylint_home} because of {exc}."
)
return pylint_home
|
34,033 |
def merge_runtime_envs(parent_env: Dict, child_env: Dict) -> Dict:
"""Creates a runtime_env dict by merging a parent and child environment.
This method is not destructive. It leaves the parent and child envs
the same.
The merge is a shallow update where the child environment inherits the
parent environment's settings. If the child environment specifies any
env settings, those settings take precdence over the parent.
- Note: env_vars are a special case. The child's env_vars are combined
with the parent.
Args:
parent_env: The environment to inherit settings from.
child_env: The environment with override settings.
Returns: A dictionary containing the merged runtime_env settings.
Raises:
TypeError: If a dictionary is not passed in for parent_env or child_env.
"""
if not isinstance(parent_env, Dict):
raise TypeError(
f'Got unexpected type "{type(parent_env)}" for parent_env. '
"parent_env must be a dictionary."
)
if not isinstance(child_env, Dict):
raise TypeError(
f'Got unexpected type "{type(child_env)}" for child_env. '
"child_env must be a dictionary."
)
defaults = copy.deepcopy(parent_env)
overrides = copy.deepcopy(child_env)
default_env_vars = defaults.get("env_vars", {})
override_env_vars = overrides.get("env_vars", {})
defaults.update(overrides)
default_env_vars.update(override_env_vars)
defaults["env_vars"] = default_env_vars
return defaults
|
def merge_runtime_envs(parent_env: Dict, child_env: Dict) -> Dict:
"""Creates a runtime_env dict by merging a parent and child environment.
This method is not destructive. It leaves the parent and child envs
the same.
The merge is a shallow update where the child environment inherits the
parent environment's settings. If the child environment specifies any
env settings, those settings take precdence over the parent.
- Note: env_vars are a special case. The child's env_vars are combined
with the parent.
Args:
parent_env: The environment to inherit settings from.
child_env: The environment with override settings.
Returns: A new dictionary containing the merged runtime_env settings.
Raises:
TypeError: If a dictionary is not passed in for parent_env or child_env.
"""
if not isinstance(parent_env, Dict):
raise TypeError(
f'Got unexpected type "{type(parent_env)}" for parent_env. '
"parent_env must be a dictionary."
)
if not isinstance(child_env, Dict):
raise TypeError(
f'Got unexpected type "{type(child_env)}" for child_env. '
"child_env must be a dictionary."
)
defaults = copy.deepcopy(parent_env)
overrides = copy.deepcopy(child_env)
default_env_vars = defaults.get("env_vars", {})
override_env_vars = overrides.get("env_vars", {})
defaults.update(overrides)
default_env_vars.update(override_env_vars)
defaults["env_vars"] = default_env_vars
return defaults
|
56,758 |
def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=None,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws : int, default 2000
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel : class, default `pymc.smc.smc.IMH`
SMC_Kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings)
start : dict, or array of dict, default None
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model : Model (optional if in ``with`` context).
random_seed : int, array_like of int, RandomState or Generator, optional
Random seed(s) used by the sampling steps. If a list, tuple or array of ints
is passed, each entry will be used to seed each chain. A ValueError will be
raised if the length does not match the number of chains.
chains : int, default None
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int, default None
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool, default True
Whether to compute sampler statistics like ``R hat`` and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default True
Whether to return the trace as an InferenceData (True) object or a MultiTrace (False).
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc.to_inference_data`.
progressbar : bool, optional, default True
Whether or not to display a progress bar in the command line.
**kernel_kwargs : dict, optional
Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords:
threshold : float, default 0.5
Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
correlation_threshold : float, default 0.01
The lower the value the higher the number of MCMC steps computed automatically.
Defaults to 0.01. It should be between 0 and 1.
Keyword arguments for other kernels should be checked in the respective docstrings.
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution.
So in more general terms, we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equal some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal.
7. Run N independent MCMC chains, starting each one from a different sample
in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the
mean of the previous posterior stage and not the current point in parameter space.
8. The N chains are run until the autocorrelation with the samples from the previous stage
stops decreasing given a certain threshold.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013).
"Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."
Geophysical Journal International, 2013, 194(3), pp.1701-1726.
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J., and Chen, Y. (2007).
"Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging." J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816).
`link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
FutureWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
FutureWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
FutureWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
FutureWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
if random_seed == -1:
raise FutureWarning(
f"random_seed should be a non-negative integer or None, got: {random_seed}"
"This will raise a ValueError in the Future"
)
random_seed = None
if isinstance(random_seed, int) or random_seed is None:
rng = np.random.default_rng(seed=random_seed)
random_seed = list(rng.integers(2**30, size=chains))
elif isinstance(random_seed, Iterable):
if len(random_seed) != chains:
raise ValueError(f"Length of seeds ({len(seeds)}) must match number of chains {chains}")
else:
raise TypeError("Invalid value for `random_seed`. Must be tuple, list, int or None")
model = modelcontext(model)
_log = logging.getLogger("pymc")
_log.info("Initializing SMC sampler...")
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
results = run_chains_parallel(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores
)
else:
results = run_chains_sequential(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
_t_sampling = time.time() - t1
sample_stats, idata = _save_sample_stats(
sample_settings,
sample_stats,
chains,
trace,
return_inferencedata,
_t_sampling,
idata_kwargs,
model,
)
if compute_convergence_checks:
_compute_convergence_checks(idata, draws, model, trace)
return idata if return_inferencedata else trace
|
def sample_smc(
draws=2000,
kernel=IMH,
*,
start=None,
model=None,
random_seed=None,
chains=None,
cores=None,
compute_convergence_checks=True,
return_inferencedata=True,
idata_kwargs=None,
progressbar=True,
**kernel_kwargs,
):
r"""
Sequential Monte Carlo based sampling.
Parameters
----------
draws : int, default 2000
The number of samples to draw from the posterior (i.e. last stage). And also the number of
independent chains. Defaults to 2000.
kernel : SMC_kernel, optional
SMC kernel used. Defaults to :class:`pymc.smc.smc.IMH` (Independent Metropolis Hastings)
start : dict, or array of dict, default None
Starting point in parameter space. It should be a list of dict with length `chains`.
When None (default) the starting point is sampled from the prior distribution.
model : Model (optional if in ``with`` context).
random_seed : int, array_like of int, RandomState or Generator, optional
Random seed(s) used by the sampling steps. If a list, tuple or array of ints
is passed, each entry will be used to seed each chain. A ValueError will be
raised if the length does not match the number of chains.
chains : int, default None
The number of chains to sample. Running independent chains is important for some
convergence statistics. If ``None`` (default), then set to either ``cores`` or 2, whichever
is larger.
cores : int, default None
The number of chains to run in parallel. If ``None``, set to the number of CPUs in the
system.
compute_convergence_checks : bool, default True
Whether to compute sampler statistics like ``R hat`` and ``effective_n``.
Defaults to ``True``.
return_inferencedata : bool, default True
Whether to return the trace as an InferenceData (True) object or a MultiTrace (False).
Defaults to ``True``.
idata_kwargs : dict, optional
Keyword arguments for :func:`pymc.to_inference_data`.
progressbar : bool, optional, default True
Whether or not to display a progress bar in the command line.
**kernel_kwargs : dict, optional
Keyword arguments passed to the SMC_kernel. The default IMH kernel takes the following keywords:
threshold : float, default 0.5
Determines the change of beta from stage to stage, i.e. indirectly the number of stages,
the higher the value of `threshold` the higher the number of stages. Defaults to 0.5.
It should be between 0 and 1.
correlation_threshold : float, default 0.01
The lower the value the higher the number of MCMC steps computed automatically.
Defaults to 0.01. It should be between 0 and 1.
Keyword arguments for other kernels should be checked in the respective docstrings.
Notes
-----
SMC works by moving through successive stages. At each stage the inverse temperature
:math:`\beta` is increased a little bit (starting from 0 up to 1). When :math:`\beta` = 0
we have the prior distribution and when :math:`\beta` = 1 we have the posterior distribution.
So in more general terms, we are always computing samples from a tempered posterior that we can
write as:
.. math::
p(\theta \mid y)_{\beta} = p(y \mid \theta)^{\beta} p(\theta)
A summary of the algorithm is:
1. Initialize :math:`\beta` at zero and stage at zero.
2. Generate N samples :math:`S_{\beta}` from the prior (because when :math `\beta = 0` the
tempered posterior is the prior).
3. Increase :math:`\beta` in order to make the effective sample size equal some predefined
value (we use :math:`Nt`, where :math:`t` is 0.5 by default).
4. Compute a set of N importance weights W. The weights are computed as the ratio of the
likelihoods of a sample at stage i+1 and stage i.
5. Obtain :math:`S_{w}` by re-sampling according to W.
6. Use W to compute the mean and covariance for the proposal distribution, a MvNormal.
7. Run N independent MCMC chains, starting each one from a different sample
in :math:`S_{w}`. For the IMH kernel, the mean of the proposal distribution is the
mean of the previous posterior stage and not the current point in parameter space.
8. The N chains are run until the autocorrelation with the samples from the previous stage
stops decreasing given a certain threshold.
9. Repeat from step 3 until :math:`\beta \ge 1`.
10. The final result is a collection of N samples from the posterior.
References
----------
.. [Minson2013] Minson, S. E., Simons, M., and Beck, J. L. (2013).
"Bayesian inversion for finite fault earthquake source models I- Theory and algorithm."
Geophysical Journal International, 2013, 194(3), pp.1701-1726.
`link <https://gji.oxfordjournals.org/content/194/3/1701.full>`__
.. [Ching2007] Ching, J., and Chen, Y. (2007).
"Transitional Markov Chain Monte Carlo Method for Bayesian Model Updating, Model Class
Selection, and Model Averaging." J. Eng. Mech., 2007, 133(7), pp. 816-832. doi:10.1061/(ASCE)0733-9399(2007)133:7(816).
`link <http://ascelibrary.org/doi/abs/10.1061/%28ASCE%290733-9399
%282007%29133:7%28816%29>`__
"""
if isinstance(kernel, str) and kernel.lower() in ("abc", "metropolis"):
warnings.warn(
f'The kernel string argument "{kernel}" in sample_smc has been deprecated. '
f"It is no longer needed to distinguish between `abc` and `metropolis`",
FutureWarning,
stacklevel=2,
)
kernel = IMH
if kernel_kwargs.pop("save_sim_data", None) is not None:
warnings.warn(
"save_sim_data has been deprecated. Use pm.sample_posterior_predictive "
"to obtain the same type of samples.",
FutureWarning,
stacklevel=2,
)
if kernel_kwargs.pop("save_log_pseudolikelihood", None) is not None:
warnings.warn(
"save_log_pseudolikelihood has been deprecated. This information is "
"now saved as log_likelihood in models with Simulator distributions.",
FutureWarning,
stacklevel=2,
)
parallel = kernel_kwargs.pop("parallel", None)
if parallel is not None:
warnings.warn(
"The argument parallel is deprecated, use the argument cores instead.",
FutureWarning,
stacklevel=2,
)
if parallel is False:
cores = 1
if cores is None:
cores = _cpu_count()
if chains is None:
chains = max(2, cores)
else:
cores = min(chains, cores)
if random_seed == -1:
raise FutureWarning(
f"random_seed should be a non-negative integer or None, got: {random_seed}"
"This will raise a ValueError in the Future"
)
random_seed = None
if isinstance(random_seed, int) or random_seed is None:
rng = np.random.default_rng(seed=random_seed)
random_seed = list(rng.integers(2**30, size=chains))
elif isinstance(random_seed, Iterable):
if len(random_seed) != chains:
raise ValueError(f"Length of seeds ({len(seeds)}) must match number of chains {chains}")
else:
raise TypeError("Invalid value for `random_seed`. Must be tuple, list, int or None")
model = modelcontext(model)
_log = logging.getLogger("pymc")
_log.info("Initializing SMC sampler...")
_log.info(
f"Sampling {chains} chain{'s' if chains > 1 else ''} "
f"in {cores} job{'s' if cores > 1 else ''}"
)
params = (
draws,
kernel,
start,
model,
)
t1 = time.time()
if cores > 1:
results = run_chains_parallel(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores
)
else:
results = run_chains_sequential(
chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs
)
(
traces,
sample_stats,
sample_settings,
) = zip(*results)
trace = MultiTrace(traces)
_t_sampling = time.time() - t1
sample_stats, idata = _save_sample_stats(
sample_settings,
sample_stats,
chains,
trace,
return_inferencedata,
_t_sampling,
idata_kwargs,
model,
)
if compute_convergence_checks:
_compute_convergence_checks(idata, draws, model, trace)
return idata if return_inferencedata else trace
|
12,290 |
def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None,
label_top=True, color_style="threshold"):
"""Draws a Hinton diagram for visualizing a density matrix or superoperator.
Parameters
----------
rho : qobj
Input density matrix or superoperator.
xlabels : list of strings or False
list of x labels
ylabels : list of strings or False
list of y labels
title : string
title of the plot (optional)
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
cmap : a matplotlib colormap instance
Color map to use when plotting.
label_top : bool
If True, x-axis labels will be placed on top, otherwise
they will appear below the plot.
color_style : string
Determines how colors are assigned to each square. If set to
`"threshold"` (default), each square is plotted as the maximum of
`cmap` for positive numbers and as the minimum for minimum. If set to
`"scaled"`, each color is chosen by passing the magnitude of the
corresponding matrix element into `cmap`. If set to `"phase"`, each
color is chosen according to the argument of the corresponding matrix
element; note that this generalizes `"threshold"` to complex numbers.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not a quantum object.
"""
# Apply default colormaps.
# TODO: abstract this away into something that makes default
# colormaps.
cmap = (
(cm.Greys_r if settings.colorblind_safe else cm.RdBu)
if cmap is None else cmap
)
# Extract plotting data W from the input.
if isinstance(rho, Qobj):
if rho.isoper:
W = rho.full()
# Create default labels if none are given.
if xlabels is None or ylabels is None:
labels = _cb_labels(rho.dims[0])
xlabels = xlabels if xlabels is not None else list(labels[0])
ylabels = ylabels if ylabels is not None else list(labels[1])
elif rho.isoperket:
W = vector_to_operator(rho).full()
elif rho.isoperbra:
W = vector_to_operator(rho.dag()).full()
elif rho.issuper:
if not _isqubitdims(rho.dims):
raise ValueError("Hinton plots of superoperators are "
"currently only supported for qubits.")
# Convert to a superoperator in the Pauli basis,
# so that all the elements are real.
sqobj = _super_to_superpauli(rho)
nq = int(log2(sqobj.shape[0]) / 2)
W = sqobj.full().T
# Create default labels, too.
if (xlabels is None) or (ylabels is None):
labels = list(map("".join, it.product("IXYZ", repeat=nq)))
xlabels = xlabels if xlabels is not None else labels
ylabels = ylabels if ylabels is not None else labels
else:
raise ValueError(
"Input quantum object must be an operator or superoperator."
)
else:
W = rho
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = None
if not (xlabels or ylabels):
ax.axis('off')
ax.axis('equal')
ax.set_frame_on(False)
height, width = W.shape
w_max = 1.25 * max(abs(np.diag(np.array(W))))
if w_max <= 0.0:
w_max = 1.0
# Set color_fn here.
if color_style == "scaled":
def color_fn(w):
return cmap(int((w + w_max) * 256 / (2 * w_max)))
elif color_style == "threshold":
def color_fn(w):
return cmap(255 if w > 0 else 0)
elif color_style == "phase":
def color_fn(w):
return cmap(int(255 * np.mod(1 - np.angle(w) / np.pi, 2)))
else:
raise ValueError(
"Unknown color style {} for Hinton diagrams.".format(color_style)
)
ax.fill(array([0, width, width, 0]), array([0, 0, height, height]),
color=cmap(128))
for x in range(width):
for y in range(height):
_x = x + 1
_y = y + 1
if np.real(W[x, y]) > 0.0:
_blob(_x - 0.5, height - _y + 0.5, abs(W[x, y]), w_max,
min(1, abs(W[x, y]) / w_max), color_fn=color_fn, ax=ax)
else:
_blob(
_x - 0.5, height - _y + 0.5,
-abs(W[x, y]), w_max,
min(1, abs(W[x, y]) / w_max), color_fn=color_fn, ax=ax
)
# color axis
norm = mpl.colors.Normalize(-abs(W).max(), abs(W).max())
cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1)
mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap)
xtics = 0.5 + np.arange(width)
# x axis
ax.xaxis.set_major_locator(plt.FixedLocator(xtics))
if xlabels:
nxlabels = len(xlabels)
if nxlabels != len(xtics):
raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}")
ax.set_xticklabels(xlabels)
if label_top:
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=14)
# y axis
ytics = 0.5 + np.arange(height)
ax.yaxis.set_major_locator(plt.FixedLocator(ytics))
if ylabels:
nylabels = len(ylabels)
if nylabels != len(ytics):
raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}")
ax.set_yticklabels(list(reversed(ylabels)))
ax.tick_params(axis='y', labelsize=14)
return fig, ax
|
def hinton(rho, xlabels=None, ylabels=None, title=None, ax=None, cmap=None,
label_top=True, color_style="threshold"):
"""Draws a Hinton diagram for visualizing a density matrix or superoperator.
Parameters
----------
rho : qobj
Input density matrix or superoperator.
xlabels : list of strings or False
list of x labels
ylabels : list of strings or False
list of y labels
title : string
title of the plot (optional)
ax : a matplotlib axes instance
The axes context in which the plot will be drawn.
cmap : a matplotlib colormap instance
Color map to use when plotting.
label_top : bool
If True, x-axis labels will be placed on top, otherwise
they will appear below the plot.
color_style : string
Determines how colors are assigned to each square. If set to
`"threshold"` (default), each square is plotted as the maximum of
`cmap` for positive numbers and as the minimum for negative numbers. If set to
`"scaled"`, each color is chosen by passing the magnitude of the
corresponding matrix element into `cmap`. If set to `"phase"`, each
color is chosen according to the argument of the corresponding matrix
element; note that this generalizes `"threshold"` to complex numbers.
Returns
-------
fig, ax : tuple
A tuple of the matplotlib figure and axes instances used to produce
the figure.
Raises
------
ValueError
Input argument is not a quantum object.
"""
# Apply default colormaps.
# TODO: abstract this away into something that makes default
# colormaps.
cmap = (
(cm.Greys_r if settings.colorblind_safe else cm.RdBu)
if cmap is None else cmap
)
# Extract plotting data W from the input.
if isinstance(rho, Qobj):
if rho.isoper:
W = rho.full()
# Create default labels if none are given.
if xlabels is None or ylabels is None:
labels = _cb_labels(rho.dims[0])
xlabels = xlabels if xlabels is not None else list(labels[0])
ylabels = ylabels if ylabels is not None else list(labels[1])
elif rho.isoperket:
W = vector_to_operator(rho).full()
elif rho.isoperbra:
W = vector_to_operator(rho.dag()).full()
elif rho.issuper:
if not _isqubitdims(rho.dims):
raise ValueError("Hinton plots of superoperators are "
"currently only supported for qubits.")
# Convert to a superoperator in the Pauli basis,
# so that all the elements are real.
sqobj = _super_to_superpauli(rho)
nq = int(log2(sqobj.shape[0]) / 2)
W = sqobj.full().T
# Create default labels, too.
if (xlabels is None) or (ylabels is None):
labels = list(map("".join, it.product("IXYZ", repeat=nq)))
xlabels = xlabels if xlabels is not None else labels
ylabels = ylabels if ylabels is not None else labels
else:
raise ValueError(
"Input quantum object must be an operator or superoperator."
)
else:
W = rho
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
else:
fig = None
if not (xlabels or ylabels):
ax.axis('off')
ax.axis('equal')
ax.set_frame_on(False)
height, width = W.shape
w_max = 1.25 * max(abs(np.diag(np.array(W))))
if w_max <= 0.0:
w_max = 1.0
# Set color_fn here.
if color_style == "scaled":
def color_fn(w):
return cmap(int((w + w_max) * 256 / (2 * w_max)))
elif color_style == "threshold":
def color_fn(w):
return cmap(255 if w > 0 else 0)
elif color_style == "phase":
def color_fn(w):
return cmap(int(255 * np.mod(1 - np.angle(w) / np.pi, 2)))
else:
raise ValueError(
"Unknown color style {} for Hinton diagrams.".format(color_style)
)
ax.fill(array([0, width, width, 0]), array([0, 0, height, height]),
color=cmap(128))
for x in range(width):
for y in range(height):
_x = x + 1
_y = y + 1
if np.real(W[x, y]) > 0.0:
_blob(_x - 0.5, height - _y + 0.5, abs(W[x, y]), w_max,
min(1, abs(W[x, y]) / w_max), color_fn=color_fn, ax=ax)
else:
_blob(
_x - 0.5, height - _y + 0.5,
-abs(W[x, y]), w_max,
min(1, abs(W[x, y]) / w_max), color_fn=color_fn, ax=ax
)
# color axis
norm = mpl.colors.Normalize(-abs(W).max(), abs(W).max())
cax, kw = mpl.colorbar.make_axes(ax, shrink=0.75, pad=.1)
mpl.colorbar.ColorbarBase(cax, norm=norm, cmap=cmap)
xtics = 0.5 + np.arange(width)
# x axis
ax.xaxis.set_major_locator(plt.FixedLocator(xtics))
if xlabels:
nxlabels = len(xlabels)
if nxlabels != len(xtics):
raise ValueError(f"got {nxlabels} xlabels but needed {len(xtics)}")
ax.set_xticklabels(xlabels)
if label_top:
ax.xaxis.tick_top()
ax.tick_params(axis='x', labelsize=14)
# y axis
ytics = 0.5 + np.arange(height)
ax.yaxis.set_major_locator(plt.FixedLocator(ytics))
if ylabels:
nylabels = len(ylabels)
if nylabels != len(ytics):
raise ValueError(f"got {nylabels} ylabels but needed {len(ytics)}")
ax.set_yticklabels(list(reversed(ylabels)))
ax.tick_params(axis='y', labelsize=14)
return fig, ax
|
31,191 |
def get_incident_data_command(client, args):
incident_obj_id = args.get('incident_obj_id')
incident_id = args.get('incident_id')
date = args.get('date')
result = client.get_incident_data(incident_obj_id, incident_id, date)
if not result.get('success'):
raise DemistoException(result['message'])
incident_data = result.get('rows')
table_header = []
if len(incident_data) > 0:
table_header = list(incident_data[0].keys())
markdown = tableToMarkdown('Incident Data', incident_data, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.data',
outputs_key_field='',
outputs=incident_data
)
|
def get_incident_data_command(client, args):
incident_obj_id = args.get('incident_obj_id')
incident_id = args.get('incident_id')
date = args.get('date')
result = client.get_incident_data(incident_obj_id, incident_id, date)
if not result.get('success'):
raise DemistoException(result['message'])
incident_data = result.get('rows',{})
table_header = []
if len(incident_data) > 0:
table_header = list(incident_data[0].keys())
markdown = tableToMarkdown('Incident Data', incident_data, headers=table_header)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.data',
outputs_key_field='',
outputs=incident_data
)
|
23,156 |
def read_parquet(
path,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
engine="auto",
calculate_divisions=None,
ignore_metadata_file=False,
metadata_task_size=None,
split_row_groups=False,
chunksize=None,
aggregate_files=None,
parquet_file_extension=(".parq", ".parquet", ".pq"),
**kwargs,
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : str or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : str or list, default None
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None
List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow"`` is also specified. For
other engines, filtering is only performed at the partition level, that is,
to prevent the loading of some row-groups and/or files.
For the "pyarrow" engine, predicates can be expressed in disjunctive
normal form (DNF). This means that the inner-most tuple describes a single
column predicate. These inner predicates are combined with an AND
conjunction into a larger predicate. The outer-most list then combines all
of the combined filters with an OR disjunction.
Predicates can also be expressed as a ``List[Tuple]``. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred for "pyarrow") ``List[List[Tuple]]`` notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (``List[Tuple]`` is required).
index : str, list or False, default None
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata, if present. Use ``False``
to read all fields as columns.
categories : list or dict, default None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
open_file_options : dict, default None
Key/value arguments to be passed along to ``AbstractFileSystem.open``
when each parquet data file is open for reading. Experimental
(optimized) "precaching" for remote file systems (e.g. S3, GCS) can
be enabled by adding ``{"method": "parquet"}`` under the
``"precache_options"`` key. Also, a custom file-open function can be
used (instead of ``AbstractFileSystem.open``), by specifying the
desired function under the ``"open_file_func"`` key.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. Options include: 'auto', 'fastparquet', and
'pyarrow'. Defaults to 'auto', which uses ``fastparquet`` if it is
installed, and falls back to ``pyarrow`` otherwise. Note that in the
future this default ordering for 'auto' will switch, with ``pyarrow``
being used if it is installed, and falling back to ``fastparquet``.
calculate_divisions : bool, default False
Whether to use Parquet metadata statistics (when available) to
calculate divisions for the output DataFrame collection. This option
will be ignored if ``index`` is not specified and there is no physical
index column specified in the custom "pandas" Parquet metadata. Note
that ``calculate_divisions=True`` may be extremely slow on some systems,
and should be avoided when reading from remote storage.
ignore_metadata_file : bool, default False
Whether to ignore the global ``_metadata`` file (when one is present).
If ``True``, or if the global ``_metadata`` file is missing, the parquet
metadata may be gathered and processed in parallel. Parallel metadata
processing is currently supported for ``ArrowDatasetEngine`` only.
metadata_task_size : int, default configurable
If parquet metadata is processed in parallel (see ``ignore_metadata_file``
description above), this argument can be used to specify the number of
dataset files to be processed by each task in the Dask graph. If this
argument is set to ``0``, parallel metadata processing will be disabled.
The default values for local and remote filesystems can be specified
with the "metadata-task-size-local" and "metadata-task-size-remote"
config fields, respectively (see "dataframe.parquet").
split_row_groups : bool or int, default False
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
chunksize : int or str, default None
The desired size of each output ``DataFrame`` partition in terms of total
(uncompressed) parquet storage space. If specified, adjacent row-groups
and/or files will be aggregated into the same output partition until the
cumulative ``total_byte_size`` parquet-metadata statistic reaches this
value. Use `aggregate_files` to enable/disable inter-file aggregation.
aggregate_files : bool or str, default None
Whether distinct file paths may be aggregated into the same output
partition. This parameter is only used when `chunksize` is specified
or when `split_row_groups` is an integer >1. A setting of True means
that any two file paths may be aggregated into the same output partition,
while False means that inter-file aggregation is prohibited.
For "hive-partitioned" datasets, a "partition"-column name can also be
specified. In this case, we allow the aggregation of any two files
sharing a file path up to, and including, the corresponding directory name.
For example, if ``aggregate_files`` is set to ``"section"`` for the
directory structure below, ``03.parquet`` and ``04.parquet`` may be
aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.
If, however, ``aggregate_files`` is set to ``"region"``, ``01.parquet``
may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated
with ``04.parquet``::
dataset-path/
├── region=1/
│ ├── section=a/
│ │ └── 01.parquet
│ ├── section=b/
│ └── └── 02.parquet
└── region=2/
├── section=a/
│ ├── 03.parquet
└── └── 04.parquet
Note that the default behavior of ``aggregate_files`` is ``False``.
parquet_file_extension: str, tuple[str], or None, default (".parq", ".parquet", ".pq")
A file extension or an iterable of extensions to use when discovering
parquet files in a directory. Files that don't match these extensions
will be ignored. This argument only applies when ``paths`` corresponds
to a directory and no ``_metadata`` file is present (or
``ignore_metadata_file=True``). Passing in ``parquet_file_extension=None``
will treat all files in the directory as parquet files.
The purpose of this argument is to ensure that the engine will ignore
unsupported metadata files (like Spark's '_SUCCESS' and 'crc' files).
It may be necessary to change this argument if the data files in your
parquet dataset do not end in ".parq", ".parquet", or ".pq".
**kwargs: dict (of dicts)
Passthrough key-word arguments for read backend.
The top-level keys correspond to the appropriate operation type, and
the second level corresponds to the kwargs that will be passed on to
the underlying ``pyarrow`` or ``fastparquet`` function.
Supported top-level keys: 'dataset' (for opening a ``pyarrow`` dataset),
'file' or 'dataset' (for opening a ``fastparquet.ParquetFile``), 'read'
(for the backend read function), 'arrow_to_pandas' (for controlling the
arguments passed to convert from a ``pyarrow.Table.to_pandas()``).
Any element of kwargs that is not defined under these top-level keys
will be passed through to the `engine.read_partitions` classmethod as a
stand-alone argument (and will be ignored by the engine implementations
defined in ``dask.dataframe``).
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
pyarrow.parquet.ParquetDataset
"""
if "read_from_paths" in kwargs:
kwargs.pop("read_from_paths")
warnings.warn(
"`read_from_paths` is no longer supported and will be ignored.",
FutureWarning,
)
# Handle gather_statistics deprecation
if "gather_statistics" in kwargs:
if calculate_divisions is None:
calculate_divisions = kwargs.pop("gather_statistics")
warnings.warn(
"``gather_statistics`` is deprecated and will be removed in a "
"future release. Please use ``calculate_divisions`` instead.",
FutureWarning,
)
else:
warnings.warn(
f"``gather_statistics`` is deprecated. Ignoring this option "
f"in favor of ``calculate_divisions={calculate_divisions}``",
FutureWarning,
)
calculate_divisions = calculate_divisions or False
# We support a top-level `parquet_file_extension` kwarg, but
# must check if the deprecated `require_extension` option is
# being passed to the engine. If `parquet_file_extension` is
# set to the default value, and `require_extension` was also
# specified, we will use `require_extension` but warn the user.
if (
"dataset" in kwargs
and "require_extension" in kwargs["dataset"]
and parquet_file_extension == (".parq", ".parquet", ".pq")
):
parquet_file_extension = kwargs["dataset"].pop("require_extension")
warnings.warn(
"require_extension is deprecated, and will be removed from "
"read_parquet in a future release. Please use the top-level "
"parquet_file_extension argument instead.",
FutureWarning,
)
# Store initial function arguments
input_kwargs = {
"columns": columns,
"filters": filters,
"categories": categories,
"index": index,
"storage_options": storage_options,
"engine": engine,
"calculate_divisions": calculate_divisions,
"ignore_metadata_file": ignore_metadata_file,
"metadata_task_size": metadata_task_size,
"split_row_groups": split_row_groups,
"chunksize=": chunksize,
"aggregate_files": aggregate_files,
"parquet_file_extension": parquet_file_extension,
**kwargs,
}
if isinstance(columns, str):
input_kwargs["columns"] = [columns]
df = read_parquet(path, **input_kwargs)
return df[columns]
if columns is not None:
columns = list(columns)
if isinstance(engine, str):
engine = get_engine(engine, bool(kwargs))
if hasattr(path, "name"):
path = stringify_path(path)
# Update input_kwargs and tokenize inputs
label = "read-parquet-"
input_kwargs.update({"columns": columns, "engine": engine})
output_name = label + tokenize(path, **input_kwargs)
fs, _, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering
auto_index_allowed = False
if index is None:
# User is allowing auto-detected index
auto_index_allowed = True
if index and isinstance(index, str):
index = [index]
read_metadata_result = engine.read_metadata(
fs,
paths,
categories=categories,
index=index,
gather_statistics=calculate_divisions,
filters=filters,
split_row_groups=split_row_groups,
chunksize=chunksize,
aggregate_files=aggregate_files,
ignore_metadata_file=ignore_metadata_file,
metadata_task_size=metadata_task_size,
parquet_file_extension=parquet_file_extension,
**kwargs,
)
# In the future, we may want to give the engine the
# option to return a dedicated element for `common_kwargs`.
# However, to avoid breaking the API, we just embed this
# data in the first element of `parts` for now.
# The logic below is inteded to handle backward and forward
# compatibility with a user-defined engine.
meta, statistics, parts, index = read_metadata_result[:4]
common_kwargs = {}
aggregation_depth = False
if len(parts):
# For now, `common_kwargs` and `aggregation_depth`
# may be stored in the first element of `parts`
common_kwargs = parts[0].pop("common_kwargs", {})
aggregation_depth = parts[0].pop("aggregation_depth", aggregation_depth)
# Parse dataset statistics from metadata (if available)
parts, divisions, index, index_in_columns = process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
)
# Account for index and columns arguments.
# Modify `meta` dataframe accordingly
meta, index, columns = set_index_columns(
meta, index, columns, index_in_columns, auto_index_allowed
)
if meta.index.name == NONE_LABEL:
meta.index.name = None
# Set the index that was previously treated as a column
if index_in_columns:
meta = meta.set_index(index)
if meta.index.name == NONE_LABEL:
meta.index.name = None
if len(divisions) < 2:
# empty dataframe - just use meta
graph = {(output_name, 0): meta}
divisions = (None, None)
else:
# Create Blockwise layer
layer = DataFrameIOLayer(
output_name,
columns,
parts,
ParquetFunctionWrapper(
engine,
fs,
meta,
columns,
index,
{}, # All kwargs should now be in `common_kwargs`
common_kwargs,
),
label=label,
creation_info={
"func": read_parquet,
"args": (path,),
"kwargs": input_kwargs,
},
)
graph = HighLevelGraph({output_name: layer}, {output_name: set()})
return new_dd_object(graph, output_name, meta, divisions)
|
def read_parquet(
path,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
engine="auto",
calculate_divisions=None,
ignore_metadata_file=False,
metadata_task_size=None,
split_row_groups=False,
chunksize=None,
aggregate_files=None,
parquet_file_extension=(".parq", ".parquet", ".pq"),
**kwargs,
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
Parameters
----------
path : str or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : str or list, default None
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None
List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.
Using this argument will NOT result in row-wise filtering of the final
partitions unless ``engine="pyarrow"`` is also specified. For
other engines, filtering is only performed at the partition level, that is,
to prevent the loading of some row-groups and/or files.
For the "pyarrow" engine, predicates can be expressed in disjunctive
normal form (DNF). This means that the inner-most tuple describes a single
column predicate. These inner predicates are combined with an AND
conjunction into a larger predicate. The outer-most list then combines all
of the combined filters with an OR disjunction.
Predicates can also be expressed as a ``List[Tuple]``. These are evaluated
as an AND conjunction. To express OR in predictates, one must use the
(preferred for "pyarrow") ``List[List[Tuple]]`` notation.
Note that the "fastparquet" engine does not currently support DNF for
the filtering of partitioned columns (``List[Tuple]`` is required).
index : str, list or False, default None
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata, if present. Use ``False``
to read all fields as columns.
categories : list or dict, default None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask/fastparquet, not otherwise.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
open_file_options : dict, default None
Key/value arguments to be passed along to ``AbstractFileSystem.open``
when each parquet data file is open for reading. Experimental
(optimized) "precaching" for remote file systems (e.g. S3, GCS) can
be enabled by adding ``{"method": "parquet"}`` under the
``"precache_options"`` key. Also, a custom file-open function can be
used (instead of ``AbstractFileSystem.open``), by specifying the
desired function under the ``"open_file_func"`` key.
engine : {'auto', 'fastparquet', 'pyarrow'}, default 'auto'
Parquet library to use. Options include: 'auto', 'fastparquet', and
'pyarrow'. Defaults to 'auto', which uses ``fastparquet`` if it is
installed, and falls back to ``pyarrow`` otherwise. Note that in the
future this default ordering for 'auto' will switch, with ``pyarrow``
being used if it is installed, and falling back to ``fastparquet``.
calculate_divisions : bool, default False
Whether to use Parquet metadata statistics (when available) to
calculate divisions for the output DataFrame collection. This option
will be ignored if ``index`` is not specified and there is no physical
index column specified in the custom "pandas" Parquet metadata. Note
that ``calculate_divisions=True`` may be extremely slow on some systems,
and should be avoided when reading from remote storage.
ignore_metadata_file : bool, default False
Whether to ignore the global ``_metadata`` file (when one is present).
If ``True``, or if the global ``_metadata`` file is missing, the parquet
metadata may be gathered and processed in parallel. Parallel metadata
processing is currently supported for ``ArrowDatasetEngine`` only.
metadata_task_size : int, default configurable
If parquet metadata is processed in parallel (see ``ignore_metadata_file``
description above), this argument can be used to specify the number of
dataset files to be processed by each task in the Dask graph. If this
argument is set to ``0``, parallel metadata processing will be disabled.
The default values for local and remote filesystems can be specified
with the "metadata-task-size-local" and "metadata-task-size-remote"
config fields, respectively (see "dataframe.parquet").
split_row_groups : bool or int, default False
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
chunksize : int or str, default None
The desired size of each output ``DataFrame`` partition in terms of total
(uncompressed) parquet storage space. If specified, adjacent row-groups
and/or files will be aggregated into the same output partition until the
cumulative ``total_byte_size`` parquet-metadata statistic reaches this
value. Use `aggregate_files` to enable/disable inter-file aggregation.
aggregate_files : bool or str, default None
Whether distinct file paths may be aggregated into the same output
partition. This parameter is only used when `chunksize` is specified
or when `split_row_groups` is an integer >1. A setting of True means
that any two file paths may be aggregated into the same output partition,
while False means that inter-file aggregation is prohibited.
For "hive-partitioned" datasets, a "partition"-column name can also be
specified. In this case, we allow the aggregation of any two files
sharing a file path up to, and including, the corresponding directory name.
For example, if ``aggregate_files`` is set to ``"section"`` for the
directory structure below, ``03.parquet`` and ``04.parquet`` may be
aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.
If, however, ``aggregate_files`` is set to ``"region"``, ``01.parquet``
may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated
with ``04.parquet``::
dataset-path/
├── region=1/
│ ├── section=a/
│ │ └── 01.parquet
│ ├── section=b/
│ └── └── 02.parquet
└── region=2/
├── section=a/
│ ├── 03.parquet
└── └── 04.parquet
Note that the default behavior of ``aggregate_files`` is ``False``.
parquet_file_extension: str, tuple[str], or None, default (".parq", ".parquet", ".pq")
A file extension or an iterable of extensions to use when discovering
parquet files in a directory. Files that don't match these extensions
will be ignored. This argument only applies when ``paths`` corresponds
to a directory and no ``_metadata`` file is present (or
``ignore_metadata_file=True``). Passing in ``parquet_file_extension=None``
will treat all files in the directory as parquet files.
The purpose of this argument is to ensure that the engine will ignore
unsupported metadata files (like Spark's '_SUCCESS' and 'crc' files).
It may be necessary to change this argument if the data files in your
parquet dataset do not end in ".parq", ".parquet", or ".pq".
**kwargs: dict (of dicts)
Passthrough key-word arguments for read backend.
The top-level keys correspond to the appropriate operation type, and
the second level corresponds to the kwargs that will be passed on to
the underlying ``pyarrow`` or ``fastparquet`` function.
Supported top-level keys: 'dataset' (for opening a ``pyarrow`` dataset),
'file' or 'dataset' (for opening a ``fastparquet.ParquetFile``), 'read'
(for the backend read function), 'arrow_to_pandas' (for controlling the
arguments passed to convert from a ``pyarrow.Table.to_pandas()``).
Any element of kwargs that is not defined under these top-level keys
will be passed through to the `engine.read_partitions` classmethod as a
stand-alone argument (and will be ignored by the engine implementations
defined in ``dask.dataframe``).
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
pyarrow.parquet.ParquetDataset
"""
if "read_from_paths" in kwargs:
kwargs.pop("read_from_paths")
warnings.warn(
"`read_from_paths` is no longer supported and will be ignored.",
FutureWarning,
)
# Handle gather_statistics deprecation
if "gather_statistics" in kwargs:
if calculate_divisions is None:
calculate_divisions = kwargs.pop("gather_statistics")
warnings.warn(
"``gather_statistics`` is deprecated and will be removed in a "
"future release. Please use ``calculate_divisions`` instead.",
FutureWarning,
)
else:
warnings.warn(
f"``gather_statistics`` is deprecated. Ignoring this option "
f"in favor of ``calculate_divisions={calculate_divisions}``",
FutureWarning,
)
calculate_divisions = bool(calculate_divisions)
# We support a top-level `parquet_file_extension` kwarg, but
# must check if the deprecated `require_extension` option is
# being passed to the engine. If `parquet_file_extension` is
# set to the default value, and `require_extension` was also
# specified, we will use `require_extension` but warn the user.
if (
"dataset" in kwargs
and "require_extension" in kwargs["dataset"]
and parquet_file_extension == (".parq", ".parquet", ".pq")
):
parquet_file_extension = kwargs["dataset"].pop("require_extension")
warnings.warn(
"require_extension is deprecated, and will be removed from "
"read_parquet in a future release. Please use the top-level "
"parquet_file_extension argument instead.",
FutureWarning,
)
# Store initial function arguments
input_kwargs = {
"columns": columns,
"filters": filters,
"categories": categories,
"index": index,
"storage_options": storage_options,
"engine": engine,
"calculate_divisions": calculate_divisions,
"ignore_metadata_file": ignore_metadata_file,
"metadata_task_size": metadata_task_size,
"split_row_groups": split_row_groups,
"chunksize=": chunksize,
"aggregate_files": aggregate_files,
"parquet_file_extension": parquet_file_extension,
**kwargs,
}
if isinstance(columns, str):
input_kwargs["columns"] = [columns]
df = read_parquet(path, **input_kwargs)
return df[columns]
if columns is not None:
columns = list(columns)
if isinstance(engine, str):
engine = get_engine(engine, bool(kwargs))
if hasattr(path, "name"):
path = stringify_path(path)
# Update input_kwargs and tokenize inputs
label = "read-parquet-"
input_kwargs.update({"columns": columns, "engine": engine})
output_name = label + tokenize(path, **input_kwargs)
fs, _, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
paths = sorted(paths, key=natural_sort_key) # numeric rather than glob ordering
auto_index_allowed = False
if index is None:
# User is allowing auto-detected index
auto_index_allowed = True
if index and isinstance(index, str):
index = [index]
read_metadata_result = engine.read_metadata(
fs,
paths,
categories=categories,
index=index,
gather_statistics=calculate_divisions,
filters=filters,
split_row_groups=split_row_groups,
chunksize=chunksize,
aggregate_files=aggregate_files,
ignore_metadata_file=ignore_metadata_file,
metadata_task_size=metadata_task_size,
parquet_file_extension=parquet_file_extension,
**kwargs,
)
# In the future, we may want to give the engine the
# option to return a dedicated element for `common_kwargs`.
# However, to avoid breaking the API, we just embed this
# data in the first element of `parts` for now.
# The logic below is inteded to handle backward and forward
# compatibility with a user-defined engine.
meta, statistics, parts, index = read_metadata_result[:4]
common_kwargs = {}
aggregation_depth = False
if len(parts):
# For now, `common_kwargs` and `aggregation_depth`
# may be stored in the first element of `parts`
common_kwargs = parts[0].pop("common_kwargs", {})
aggregation_depth = parts[0].pop("aggregation_depth", aggregation_depth)
# Parse dataset statistics from metadata (if available)
parts, divisions, index, index_in_columns = process_statistics(
parts,
statistics,
filters,
index,
chunksize,
split_row_groups,
fs,
aggregation_depth,
)
# Account for index and columns arguments.
# Modify `meta` dataframe accordingly
meta, index, columns = set_index_columns(
meta, index, columns, index_in_columns, auto_index_allowed
)
if meta.index.name == NONE_LABEL:
meta.index.name = None
# Set the index that was previously treated as a column
if index_in_columns:
meta = meta.set_index(index)
if meta.index.name == NONE_LABEL:
meta.index.name = None
if len(divisions) < 2:
# empty dataframe - just use meta
graph = {(output_name, 0): meta}
divisions = (None, None)
else:
# Create Blockwise layer
layer = DataFrameIOLayer(
output_name,
columns,
parts,
ParquetFunctionWrapper(
engine,
fs,
meta,
columns,
index,
{}, # All kwargs should now be in `common_kwargs`
common_kwargs,
),
label=label,
creation_info={
"func": read_parquet,
"args": (path,),
"kwargs": input_kwargs,
},
)
graph = HighLevelGraph({output_name: layer}, {output_name: set()})
return new_dd_object(graph, output_name, meta, divisions)
|
22,298 |
def looks_like_xml(path, regex=TOOL_REGEX):
full_path = os.path.abspath(path)
if not full_path.endswith(".xml"):
return False
if not os.path.getsize(full_path):
return False
if(checkers.check_binary(full_path) or
checkers.check_image(full_path) or
checkers.is_gzip(full_path) or
checkers.is_bz2(full_path) or
checkers.is_zip(full_path)):
return False
with open(path, "r") as f:
try:
start_contents = f.read(5 * 1024)
except UnicodeDecodeError:
return False
if regex.search(start_contents):
return True
return False
|
def looks_like_xml(path, regex=TOOL_REGEX):
full_path = os.path.abspath(path)
if not full_path.endswith(".xml"):
return False
if not os.path.getsize(full_path):
return False
if(checkers.check_binary(full_path) or
checkers.check_image(full_path) or
checkers.is_gzip(full_path) or
checkers.is_bz2(full_path) or
checkers.is_zip(full_path)):
return False
with io.open(path, "r", encoding='utf-8') as f:
try:
start_contents = f.read(5 * 1024)
except UnicodeDecodeError:
return False
if regex.search(start_contents):
return True
return False
|
7,363 |
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float("inf")
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
|
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / n_samples
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float("inf")
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
|
55,196 |
def qrom_cost(constants):
r"""Return the number of Toffoli gates and the expansion factor needed to implement a QROM.
The complexity of a QROM computation in the most general form is given by
[`arXiv:2011.03494 <https://arxiv.org/abs/2011.03494>`_]
.. math::
\text{cost} = \left \lceil \frac{a + b}{k} \right \rceil + \left \lceil \frac{c}{k} \right
\rceil + d \left ( k + e \right ),
where :math:`a, b, c, d, e` are constants that depend on the nature of the QROM implementation
and the expansion factor :math:`k` is an integer power of two, :math:`k = 2^n`, that minimizes
the cost. This function computes the optimum :math:`k` and the minimum cost for a QROM
specification.
To obtain the optimum values of :math:`k`, we first assume that the cost function is continues
and use differentiation to obtain the value of :math:`k` that minimizes the cost. This value of
:math:`k` is not necessarily an integer power of 2. We then obtain the value of :math:`n` as
:math:`n = \log_2(k)` and compute the cost for
:math:`n_{int}= \left \{\left \lceil n \right \rceil, \left \lfloor n \right \rfloor \right \}`.
The value of :math:`n_{int}` that gives the smaller cost is used to compute the optimim
:math:`k`.
Args:
constants (tuple[float]): constants determining a QROM
Returns:
tuple(int, int): the cost and the expansion factor for the QROM
**Example**
>>> constants = (151.0, 7.0, 151.0, 30.0, -1.0)
>>> cost_qrom(constants)
168, 4
"""
a, b, c, d, e = constants
n = np.log2(((a + b + c) / d) ** 0.5)
k = np.array([2 ** np.floor(n), 2 ** np.ceil(n)])
cost = np.ceil((a + b) / k) + np.ceil(c / k) + d * (k + e)
return int(cost[np.argmin(cost)]), int(k[np.argmin(cost)])
|
def qrom_cost(constants):
r"""Return the number of Toffoli gates and the expansion factor needed to implement a QROM.
The complexity of a QROM computation in the most general form is given by
[`arXiv:2011.03494 <https://arxiv.org/abs/2011.03494>`_]
.. math::
\text{cost} = \left \lceil \frac{a + b}{k} \right \rceil + \left \lceil \frac{c}{k} \right
\rceil + d \left ( k + e \right ),
where :math:`a, b, c, d, e` are constants that depend on the nature of the QROM implementation
and :math:`k=2^n` is an expansion factor that minimizes the cost.
This function computes the optimum :math:`k` and the minimum cost for a QROM
specification.
To obtain the optimum values of :math:`k`, we first assume that the cost function is continues
and use differentiation to obtain the value of :math:`k` that minimizes the cost. This value of
:math:`k` is not necessarily an integer power of 2. We then obtain the value of :math:`n` as
:math:`n = \log_2(k)` and compute the cost for
:math:`n_{int}= \left \{\left \lceil n \right \rceil, \left \lfloor n \right \rfloor \right \}`.
The value of :math:`n_{int}` that gives the smaller cost is used to compute the optimim
:math:`k`.
Args:
constants (tuple[float]): constants determining a QROM
Returns:
tuple(int, int): the cost and the expansion factor for the QROM
**Example**
>>> constants = (151.0, 7.0, 151.0, 30.0, -1.0)
>>> cost_qrom(constants)
168, 4
"""
a, b, c, d, e = constants
n = np.log2(((a + b + c) / d) ** 0.5)
k = np.array([2 ** np.floor(n), 2 ** np.ceil(n)])
cost = np.ceil((a + b) / k) + np.ceil(c / k) + d * (k + e)
return int(cost[np.argmin(cost)]), int(k[np.argmin(cost)])
|
31,555 |
def build_member_data(result: dict, readable_output: str, printable_result: dict):
"""helper function. Builds the member data for group endpoints."""
members = result.get('members')
members_printable_result = []
if members:
for member in members:
current_object_data = {'member-name': member.get('name'),
'member-uid': member.get('uid'),
'member-type': member.get('type')
}
if member.get('ipv4-address'):
current_object_data['member-ipv4-address'] = member.get('ipv4-address')
if member.get('ipv6-address'):
current_object_data['member-ipv6-address'] = member.get('ipv6-address')
member_domain = member.get('domain')
if member_domain:
current_object_data.update({'member-domain-name': member_domain.get('name'),
'member-domain-uid': member_domain.get('uid'),
'member-domain-type': member_domain.get('type'),
})
members_printable_result.append(current_object_data)
printable_result['members'] = members_printable_result
member_readable_output = tableToMarkdown('CheckPoint member data:',
members_printable_result,
['member-name', 'member-uid', 'member-type''member-ipv4-address',
'member-ipv6-address', 'member-domain-name', 'member-domain-uid'],
removeNull=True)
readable_output = readable_output + member_readable_output
return readable_output, printable_result
|
def build_member_data(result: dict, readable_output: str, printable_result: dict):
"""helper function. Builds the member data for group endpoints."""
members = result.get('members')
members_printable_result = []
if members:
for member in members:
current_object_data = {'member-name': member.get('name'),
'member-uid': member.get('uid'),
'member-type': member.get('type')
}
if member.get('ipv4-address'):
current_object_data['member-ipv4-address'] = member.get('ipv4-address')
if member.get('ipv6-address'):
current_object_data['member-ipv6-address'] = member.get('ipv6-address')
member_domain = member.get('domain')
if member_domain:
current_object_data.update({'member-domain-name': member_domain.get('name'),
'member-domain-uid': member_domain.get('uid'),
'member-domain-type': member_domain.get('type'),
})
members_printable_result.append(current_object_data)
printable_result['members'] = members_printable_result
member_readable_output = tableToMarkdown('CheckPoint member data:',
members_printable_result,
['member-name', 'member-uid', 'member-type', 'member-ipv4-address',
'member-ipv6-address', 'member-domain-name', 'member-domain-uid'],
removeNull=True)
readable_output = readable_output + member_readable_output
return readable_output, printable_result
|
29,742 |
def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
rg_mode=None, rg_preamp_db=0.0):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
rg_mode : {'track','album', None}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
Increase the volume by this many dB after applying ReplayGain tags.
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
|
def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
rg_mode=None, rg_preamp_db=0.0):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
replaygain_mode : {'None', 'track','album'}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
Increase the volume by this many dB after applying ReplayGain tags.
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
|
13,186 |
def parse_cli_args():
"""
Parse the command line arguments
"""
# get config paths
home_path = Path.home()
xdg_home_path = Path(os.environ.get("XDG_CONFIG_HOME", home_path / ".config"))
xdg_dirs_path = Path(os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg"))
# get window manager
with Path(os.devnull).open("w") as devnull:
if subprocess.call(["pgrep", "i3"], stdout=devnull) == 0:
wm = "i3"
else:
wm = "sway"
# i3status config file default detection
# respect i3status' file detection order wrt issue #43
i3status_config_file_candidates = [
xdg_home_path / "py3status/config",
xdg_home_path / "i3status/config",
xdg_home_path / "i3/i3status.conf", # custom
home_path / ".i3status.conf",
home_path / ".i3/i3status.conf", # custom
xdg_dirs_path / "i3status/config",
Path("/etc/i3status.conf"),
]
for path in i3status_config_file_candidates:
if path.exists():
i3status_config_file_default = path
break
else:
# if files does not exists, defaults to ~/.i3/i3status.conf
i3status_config_file_default = i3status_config_file_candidates[3]
class Parser(argparse.ArgumentParser):
# print usages and exit on errors
def error(self, message):
print(f"\x1b[1;31merror: \x1b[0m{message}")
self.print_help()
self.exit(1)
# hide choices on errors
def _check_value(self, action, value):
if action.choices is not None and value not in action.choices:
raise argparse.ArgumentError(action, f"invalid choice: '{value}'")
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _format_action_invocation(self, action):
metavar = self._format_args(action, action.dest.upper())
return "{} {}".format(", ".join(action.option_strings), metavar)
# command line options
parser = Parser(
description="The agile, python-powered, i3status wrapper",
formatter_class=HelpFormatter,
)
parser.add_argument(
"-b",
"--dbus-notify",
action="store_true",
dest="dbus_notify",
help="send notifications via dbus instead of i3-nagbar",
)
parser.add_argument(
"-c",
"--config",
action="store",
default=i3status_config_file_default,
dest="i3status_config_path",
help="load config",
metavar="FILE",
type=Path,
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="enable debug logging in syslog or log file if --log-file option is passed",
)
parser.add_argument(
"-g",
"--gevent",
action="store_true",
dest="gevent",
help="enable gevent monkey patching",
)
parser.add_argument(
"-i",
"--include",
action="append",
dest="include_paths",
help="append additional user-defined module paths",
metavar="PATH",
type=Path,
)
parser.add_argument(
"-l",
"--log-file",
action="store",
dest="log_file",
help="enable logging to FILE, this option is not set",
metavar="FILE",
type=Path,
)
parser.add_argument(
"-s",
"--standalone",
action="store_true",
dest="standalone",
help="run py3status without i3status",
)
parser.add_argument(
"-t",
"--timeout",
action="store",
default=60,
dest="cache_timeout",
help="default module cache timeout in seconds",
metavar="INT",
type=int,
)
parser.add_argument(
"-m",
"--disable-click-events",
action="store_true",
dest="disable_click_events",
help="disable all click events",
)
parser.add_argument(
"-u",
"--i3status",
action="store",
default=which("i3status") or "i3status",
dest="i3status_path",
help="specify i3status path",
metavar="PATH",
type=Path,
)
parser.add_argument(
"-v",
"--version",
action="store_true",
dest="print_version",
help="show py3status version and exit",
)
parser.add_argument(
"--wm",
action="store", # add comment to preserve formatting
dest="wm",
metavar="WINDOW_MANAGER",
default=wm,
choices=["i3", "sway"],
help="specify window manager i3 or sway",
)
# deprecations
parser.add_argument("-n", "--interval", help=argparse.SUPPRESS)
# parse options, command, etc
options = parser.parse_args()
# make versions
options.python_version = python_version()
options.version = version
if options.print_version:
msg = "py3status version {version} (python {python_version}) on {wm}"
print(msg.format(**vars(options)))
parser.exit()
# get wm
options.wm_name = options.wm
options.wm = {
"i3": {"msg": "i3-msg", "nag": "i3-nagbar"},
"sway": {"msg": "swaymsg", "nag": "swaynag"},
}[options.wm]
# make include path to search for user modules if None
if not options.include_paths:
options.include_paths = [
xdg_home_path / "py3status/modules",
xdg_home_path / "i3status/py3status",
xdg_home_path / "i3/py3status",
home_path / ".i3/py3status",
]
include_paths = []
for path in options.include_paths:
path = path.resolve()
if path.is_dir() and any(path.iterdir()):
include_paths.append(path)
options.include_paths = include_paths
# defaults
del options.interval
del options.print_version
options.minimum_interval = 0.1 # minimum module update interval
options.click_events = not options.__dict__.pop("disable_click_events")
# all done
return options
|
def parse_cli_args():
"""
Parse the command line arguments
"""
# get config paths
home_path = Path.home()
xdg_home_path = Path(os.environ.get("XDG_CONFIG_HOME", home_path / ".config"))
xdg_dirs_path = Path(os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg"))
# get window manager
with Path(os.devnull).open("w") as devnull:
if subprocess.call(["pgrep", "i3"], stdout=devnull) == 0:
wm = "i3"
else:
wm = "sway"
# i3status config file default detection
# respect i3status' file detection order wrt issue #43
i3status_config_file_candidates = [
xdg_home_path / "py3status/config",
xdg_home_path / "i3status/config",
xdg_home_path / "i3/i3status.conf", # custom
home_path / ".i3status.conf",
home_path / ".i3/i3status.conf", # custom
xdg_dirs_path / "i3status/config",
Path("/etc/i3status.conf"),
]
for path in i3status_config_file_candidates:
if path.exists():
i3status_config_file_default = path
break
else:
# if files does not exists, defaults to ~/.i3/i3status.conf
i3status_config_file_default = i3status_config_file_candidates[3]
class Parser(argparse.ArgumentParser):
# print usages and exit on errors
def error(self, message):
print(f"\x1b[1;31merror: \x1b[0m{message}")
self.print_help()
self.exit(1)
# hide choices on errors
def _check_value(self, action, value):
if action.choices is not None and value not in action.choices:
raise argparse.ArgumentError(action, f"invalid choice: '{value}'")
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _format_action_invocation(self, action):
metavar = self._format_args(action, action.dest.upper())
return "{} {}".format(", ".join(action.option_strings), metavar)
# command line options
parser = Parser(
description="The agile, python-powered, i3status wrapper",
formatter_class=HelpFormatter,
)
parser.add_argument(
"-b",
"--dbus-notify",
action="store_true",
dest="dbus_notify",
help="send notifications via dbus instead of i3-nagbar",
)
parser.add_argument(
"-c",
"--config",
action="store",
default=i3status_config_file_default,
dest="i3status_config_path",
help="load config",
metavar="FILE",
type=Path,
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="enable debug logging in syslog or log file if --log-file option is passed",
)
parser.add_argument(
"-g",
"--gevent",
action="store_true",
dest="gevent",
help="enable gevent monkey patching",
)
parser.add_argument(
"-i",
"--include",
action="append",
dest="include_paths",
help="append additional user-defined module paths",
metavar="PATH",
type=Path,
)
parser.add_argument(
"-l",
"--log-file",
action="store",
dest="log_file",
help="enable logging to FILE (this option is not set by default)",
metavar="FILE",
type=Path,
)
parser.add_argument(
"-s",
"--standalone",
action="store_true",
dest="standalone",
help="run py3status without i3status",
)
parser.add_argument(
"-t",
"--timeout",
action="store",
default=60,
dest="cache_timeout",
help="default module cache timeout in seconds",
metavar="INT",
type=int,
)
parser.add_argument(
"-m",
"--disable-click-events",
action="store_true",
dest="disable_click_events",
help="disable all click events",
)
parser.add_argument(
"-u",
"--i3status",
action="store",
default=which("i3status") or "i3status",
dest="i3status_path",
help="specify i3status path",
metavar="PATH",
type=Path,
)
parser.add_argument(
"-v",
"--version",
action="store_true",
dest="print_version",
help="show py3status version and exit",
)
parser.add_argument(
"--wm",
action="store", # add comment to preserve formatting
dest="wm",
metavar="WINDOW_MANAGER",
default=wm,
choices=["i3", "sway"],
help="specify window manager i3 or sway",
)
# deprecations
parser.add_argument("-n", "--interval", help=argparse.SUPPRESS)
# parse options, command, etc
options = parser.parse_args()
# make versions
options.python_version = python_version()
options.version = version
if options.print_version:
msg = "py3status version {version} (python {python_version}) on {wm}"
print(msg.format(**vars(options)))
parser.exit()
# get wm
options.wm_name = options.wm
options.wm = {
"i3": {"msg": "i3-msg", "nag": "i3-nagbar"},
"sway": {"msg": "swaymsg", "nag": "swaynag"},
}[options.wm]
# make include path to search for user modules if None
if not options.include_paths:
options.include_paths = [
xdg_home_path / "py3status/modules",
xdg_home_path / "i3status/py3status",
xdg_home_path / "i3/py3status",
home_path / ".i3/py3status",
]
include_paths = []
for path in options.include_paths:
path = path.resolve()
if path.is_dir() and any(path.iterdir()):
include_paths.append(path)
options.include_paths = include_paths
# defaults
del options.interval
del options.print_version
options.minimum_interval = 0.1 # minimum module update interval
options.click_events = not options.__dict__.pop("disable_click_events")
# all done
return options
|
34,065 |
def _ndarray_to_column(arr: np.ndarray) -> Union[pd.Series, List[np.ndarray]]:
"""Convert a NumPy ndarray into an appropriate column format for insertion into a
pandas DataFrame.
If conversion to a pandas Series fails (e.g. if the ndarray is multi-dimensional),
fall back to a list of NumPy ndarrays.
"""
try:
# Try to convert to Series, falling back to a list conversion if this fails
# (e.g. if the ndarray is multi-dimensional.
return pd.Series(arr)
except ValueError:
return list(arr)
|
def _ndarray_to_column(arr: np.ndarray) -> Union[pd.Series, List[np.ndarray]]:
"""Convert a NumPy ndarray into an appropriate column format for insertion into a
pandas DataFrame.
If conversion to a pandas Series fails (e.g. if the ndarray is multi-dimensional),
fall back to a list of NumPy ndarrays.
"""
try:
# Try to convert to Series, falling back to a list conversion if this fails
# (e.g. if the ndarray is multi-dimensional).
return pd.Series(arr)
except ValueError:
return list(arr)
|
58,945 |
def thermald_thread() -> NoReturn:
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
onroad_conditions = {
"ignition": False,
}
startup_conditions: Dict[str, Any]= {}
startup_conditions_prev: Dict[str, Any] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
while True:
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
else:
no_panda_cnt = 0
onroad_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_StorageMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
|
def thermald_thread() -> NoReturn:
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
onroad_conditions = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
params = Params()
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
while True:
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if onroad_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
onroad_conditions["ignition"] = False
else:
no_panda_cnt = 0
onroad_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_StorageMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
|
8,668 |
def add_config_arguments(parser):
"""Add configuration related argument to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This function adds the proper argument to the ``parser`` given in order to
have a standard way to define a configuration filename in all of Sopel's
command line interfaces.
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
Then, when the parser parses the command line arguments, it will expose
a ``config`` option to be used to find and load Sopel's settings.
"""
parser.add_argument(
'-c', '--config',
default=None,
metavar='filename',
dest='config',
help='Use a specific configuration file')
|
def add_config_arguments(parser):
"""Add configuration-related argument to a ``parser``.
:param parser: Argument parser (or subparser)
:type parser: argparse.ArgumentParser
This function adds the proper argument to the ``parser`` given in order to
have a standard way to define a configuration filename in all of Sopel's
command line interfaces.
This can be used on an argument parser, or an argument subparser, to handle
these cases::
[sopel-command] -c [filename]
[sopel-command] [action] -c [filename]
Then, when the parser parses the command line arguments, it will expose
a ``config`` option to be used to find and load Sopel's settings.
"""
parser.add_argument(
'-c', '--config',
default=None,
metavar='filename',
dest='config',
help='Use a specific configuration file')
|
2,111 |
def test_array_function_not_called():
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = _NotAnArray(X)
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
estimator = LogisticRegression()
grid = GridSearchCV(estimator, param_grid={'C': [1, 10]})
cross_validate(grid, X, y, n_jobs=2)
|
def test_array_function_not_called():
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = _NotAnArray(X)
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
estimator = LogisticRegression()
cross_validate(estimator, X, y, cv=2)
|
48,519 |
def test_texture():
"""Test adding texture coordinates"""
# create a rectangle vertices
vertices = np.array([[0, 0, 0],
[1, 0, 0],
[1, 0.5, 0],
[0, 0.5, 0],])
# mesh faces
faces = np.hstack([[3, 0, 1, 2],
[3, 0, 3, 2]]).astype(np.int8)
# Create simple texture coordinates
t_coords = np.array([[0, 0],
[1, 0],
[1, 1],
[0, 1]])
# Create the poly data
mesh = pyvista.PolyData(vertices, faces)
# Attempt setting the texture coordinates
mesh.t_coords = t_coords
# now grab the texture coordinates
foo = mesh.t_coords
assert np.allclose(foo, t_coords)
texture = pyvista.read_texture(examples.mapfile)
mesh.textures['map'] = texture
assert mesh.textures['map'] is not None
mesh.clear_textures()
assert len(mesh.textures) == 0
mesh = examples.load_airplane()
mesh.texture_map_to_plane(inplace=True, name="tex_a", use_bounds=False)
mesh.texture_map_to_plane(inplace=True, name="tex_b", use_bounds=True)
assert not np.allclose(mesh["tex_a"], mesh["tex_b"])
mesh.textures["tex_a"] = texture.copy()
mesh.textures["tex_b"] = texture.copy()
mesh._activate_texture("tex_a")
assert np.allclose(mesh.t_coords, mesh["tex_a"])
mesh._activate_texture("tex_b")
assert np.allclose(mesh.t_coords, mesh["tex_b"])
# Now test copying
cmesh = mesh.copy()
assert len(cmesh.textures) == 2
assert "tex_a" in cmesh.textures
assert "tex_b" in cmesh.textures
|
def test_texture_airplane():
mesh = examples.load_airplane()
mesh.texture_map_to_plane(inplace=True, name="tex_a", use_bounds=False)
mesh.texture_map_to_plane(inplace=True, name="tex_b", use_bounds=True)
assert not np.allclose(mesh["tex_a"], mesh["tex_b"])
mesh.textures["tex_a"] = texture.copy()
mesh.textures["tex_b"] = texture.copy()
mesh._activate_texture("tex_a")
assert np.allclose(mesh.t_coords, mesh["tex_a"])
mesh._activate_texture("tex_b")
assert np.allclose(mesh.t_coords, mesh["tex_b"])
# Now test copying
cmesh = mesh.copy()
assert len(cmesh.textures) == 2
assert "tex_a" in cmesh.textures
assert "tex_b" in cmesh.textures
|
36,402 |
def get_layout(ns):
def in_build(f, dest="", new_name=None):
n, _, x = f.rpartition(".")
n = new_name or n
src = ns.build / f
if ns.debug and src not in REQUIRED_DLLS:
if not src.stem.endswith("_d"):
src = src.parent / (src.stem + "_d" + src.suffix)
if not n.endswith("_d"):
n += "_d"
f = n + "." + x
yield dest + n + "." + x, src
if ns.include_symbols:
pdb = src.with_suffix(".pdb")
if pdb.is_file():
yield dest + n + ".pdb", pdb
if ns.include_dev:
lib = src.with_suffix(".lib")
if lib.is_file():
yield "libs/" + n + ".lib", lib
if ns.include_appxmanifest:
yield from in_build("python_uwp.exe", new_name="python{}".format(VER_DOT))
yield from in_build("pythonw_uwp.exe", new_name="pythonw{}".format(VER_DOT))
# For backwards compatibility, but we don't reference these ourselves
yield from in_build("python_uwp.exe", new_name="python")
yield from in_build("pythonw_uwp.exe", new_name="pythonw")
else:
yield from in_build("python.exe", new_name="python{}".format(VER_DOT))
yield from in_build("pythonw.exe", new_name="pythonw{}".format(VER_DOT))
# For backwards compatibility, but we don't reference these ourselves
yield from in_build("python.exe", new_name="python")
yield from in_build("pythonw.exe", new_name="pythonw")
yield from in_build(PYTHON_DLL_NAME)
if ns.include_launchers and ns.include_appxmanifest:
if ns.include_pip:
yield from in_build("python_uwp.exe", new_name="pip{}".format(VER_DOT))
if ns.include_idle:
yield from in_build("pythonw_uwp.exe", new_name="idle{}".format(VER_DOT))
if ns.include_stable:
yield from in_build(PYTHON_STABLE_DLL_NAME)
for dest, src in rglob(ns.build, "vcruntime*.dll"):
yield dest, src
yield "LICENSE.txt", ns.build / "LICENSE.txt"
for dest, src in rglob(ns.build, ("*.pyd", "*.dll")):
if src.stem.endswith("_d") != bool(ns.debug) and src not in REQUIRED_DLLS:
continue
if src in EXCLUDE_FROM_PYDS:
continue
if src in TEST_PYDS_ONLY and not ns.include_tests:
continue
if src in TCLTK_PYDS_ONLY and not ns.include_tcltk:
continue
yield from in_build(src.name, dest="" if ns.flat_dlls else "DLLs/")
if ns.zip_lib:
zip_name = PYTHON_ZIP_NAME
yield zip_name, ns.temp / zip_name
else:
for dest, src in get_lib_layout(ns):
yield "Lib/{}".format(dest), src
if ns.include_venv:
yield from in_build("venvlauncher.exe", "Lib/venv/scripts/nt/", "python")
yield from in_build("venvwlauncher.exe", "Lib/venv/scripts/nt/", "pythonw")
if ns.include_tools:
def _c(d):
if d.is_dir():
return d in TOOLS_DIRS
return d in TOOLS_FILES
for dest, src in rglob(ns.source / "Tools", "**/*", _c):
yield "Tools/{}".format(dest), src
if ns.include_underpth:
yield PYTHON_PTH_NAME, ns.temp / PYTHON_PTH_NAME
if ns.include_dev:
def _c(d):
if d.is_dir():
return d.name != "internal"
return True
for dest, src in rglob(ns.source / "Include", "**/*.h", _c):
yield "include/{}".format(dest), src
src = ns.source / "PC" / "pyconfig.h"
yield "include/pyconfig.h", src
for dest, src in get_tcltk_lib(ns):
yield dest, src
if ns.include_pip:
for dest, src in get_pip_layout(ns):
if not isinstance(src, tuple) and (
src in EXCLUDE_FROM_LIB or src in EXCLUDE_FROM_PACKAGED_LIB
):
continue
yield dest, src
if ns.include_chm:
for dest, src in rglob(ns.doc_build / "htmlhelp", PYTHON_CHM_NAME):
yield "Doc/{}".format(dest), src
if ns.include_html_doc:
for dest, src in rglob(ns.doc_build / "html", "**/*"):
yield "Doc/html/{}".format(dest), src
if ns.include_props:
for dest, src in get_props_layout(ns):
yield dest, src
if ns.include_nuspec:
for dest, src in get_nuspec_layout(ns):
yield dest, src
for dest, src in get_appx_layout(ns):
yield dest, src
if ns.include_cat:
if ns.flat_dlls:
yield ns.include_cat.name, ns.include_cat
else:
yield "DLLs/{}".format(ns.include_cat.name), ns.include_cat
|
def get_layout(ns):
def in_build(f, dest="", new_name=None):
n, _, x = f.rpartition(".")
n = new_name or n
src = ns.build / f
if ns.debug and src not in REQUIRED_DLLS:
if not src.stem.endswith("_d"):
src = src.parent / (src.stem + "_d" + src.suffix)
if not n.endswith("_d"):
n += "_d"
f = n + "." + x
yield dest + n + "." + x, src
if ns.include_symbols:
pdb = src.with_suffix(".pdb")
if pdb.is_file():
yield dest + n + ".pdb", pdb
if ns.include_dev:
lib = src.with_suffix(".lib")
if lib.is_file():
yield "libs/" + n + ".lib", lib
if ns.include_appxmanifest:
yield from in_build("python_uwp.exe", new_name="python{}".format(VER_DOT))
yield from in_build("pythonw_uwp.exe", new_name="pythonw{}".format(VER_DOT))
# For backwards compatibility, but we don't reference these ourselves
yield from in_build("python_uwp.exe", new_name="python")
yield from in_build("pythonw_uwp.exe", new_name="pythonw")
else:
yield from in_build("python.exe", new_name="python{}".format(VER_DOT))
yield from in_build("pythonw.exe", new_name="pythonw{}".format(VER_DOT))
# For backwards compatibility, but we don't reference these ourselves.
yield from in_build("python.exe", new_name="python")
yield from in_build("pythonw.exe", new_name="pythonw")
yield from in_build(PYTHON_DLL_NAME)
if ns.include_launchers and ns.include_appxmanifest:
if ns.include_pip:
yield from in_build("python_uwp.exe", new_name="pip{}".format(VER_DOT))
if ns.include_idle:
yield from in_build("pythonw_uwp.exe", new_name="idle{}".format(VER_DOT))
if ns.include_stable:
yield from in_build(PYTHON_STABLE_DLL_NAME)
for dest, src in rglob(ns.build, "vcruntime*.dll"):
yield dest, src
yield "LICENSE.txt", ns.build / "LICENSE.txt"
for dest, src in rglob(ns.build, ("*.pyd", "*.dll")):
if src.stem.endswith("_d") != bool(ns.debug) and src not in REQUIRED_DLLS:
continue
if src in EXCLUDE_FROM_PYDS:
continue
if src in TEST_PYDS_ONLY and not ns.include_tests:
continue
if src in TCLTK_PYDS_ONLY and not ns.include_tcltk:
continue
yield from in_build(src.name, dest="" if ns.flat_dlls else "DLLs/")
if ns.zip_lib:
zip_name = PYTHON_ZIP_NAME
yield zip_name, ns.temp / zip_name
else:
for dest, src in get_lib_layout(ns):
yield "Lib/{}".format(dest), src
if ns.include_venv:
yield from in_build("venvlauncher.exe", "Lib/venv/scripts/nt/", "python")
yield from in_build("venvwlauncher.exe", "Lib/venv/scripts/nt/", "pythonw")
if ns.include_tools:
def _c(d):
if d.is_dir():
return d in TOOLS_DIRS
return d in TOOLS_FILES
for dest, src in rglob(ns.source / "Tools", "**/*", _c):
yield "Tools/{}".format(dest), src
if ns.include_underpth:
yield PYTHON_PTH_NAME, ns.temp / PYTHON_PTH_NAME
if ns.include_dev:
def _c(d):
if d.is_dir():
return d.name != "internal"
return True
for dest, src in rglob(ns.source / "Include", "**/*.h", _c):
yield "include/{}".format(dest), src
src = ns.source / "PC" / "pyconfig.h"
yield "include/pyconfig.h", src
for dest, src in get_tcltk_lib(ns):
yield dest, src
if ns.include_pip:
for dest, src in get_pip_layout(ns):
if not isinstance(src, tuple) and (
src in EXCLUDE_FROM_LIB or src in EXCLUDE_FROM_PACKAGED_LIB
):
continue
yield dest, src
if ns.include_chm:
for dest, src in rglob(ns.doc_build / "htmlhelp", PYTHON_CHM_NAME):
yield "Doc/{}".format(dest), src
if ns.include_html_doc:
for dest, src in rglob(ns.doc_build / "html", "**/*"):
yield "Doc/html/{}".format(dest), src
if ns.include_props:
for dest, src in get_props_layout(ns):
yield dest, src
if ns.include_nuspec:
for dest, src in get_nuspec_layout(ns):
yield dest, src
for dest, src in get_appx_layout(ns):
yield dest, src
if ns.include_cat:
if ns.flat_dlls:
yield ns.include_cat.name, ns.include_cat
else:
yield "DLLs/{}".format(ns.include_cat.name), ns.include_cat
|
54,785 |
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): The gate that is being applied. This argument determines the selection rules that
are used. Options are ``"BSgate"`` and ``"S2gate"``.
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
31,730 |
def get_employee_identity_analysis_genome_data_command(client, args):
email_address = str(args.get('email_address', ''))
response = client.get_employee_identity_analysis_genome_data_request(email_address)
headers = [
'description',
'key',
'name',
'values',
]
markdown = tableToMarkdown(
f"Analysis of {email_address}", response.get('histograms', []), headers=headers)
command_results = CommandResults(
readable_output=markdown,
outputs_prefix='AbnormalSecurity.EmployeeIdentityDetails',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
|
def get_employee_identity_analysis_genome_data_command(client, args):
email_address = str(args.get('email_address', ''))
response = client.get_employee_identity_analysis_genome_data_request(email_address)
headers = ['description', 'key', 'name', 'values']
markdown = tableToMarkdown(
f"Analysis of {email_address}", response.get('histograms', []), headers=headers)
command_results = CommandResults(
readable_output=markdown,
outputs_prefix='AbnormalSecurity.EmployeeIdentityDetails',
outputs_key_field='',
outputs=response,
raw_response=response
)
return command_results
|
22,076 |
def change_smb_enum_shares(table):
"""Adapt structured data from script smb-enum-shares so that it is
easy to query when inserted in DB.
"""
if not table:
return table
result = {}
for field in ["account_used", "note"]:
if field in table:
result[field] = table.pop(field)
result["shares"] = [
dict(value, Share=key) for key, value in viewitems(table)
]
result["Shares"] = xmlnmap.change_smb_enum_shares_migrate(table)
return result
|
def change_smb_enum_shares(table):
"""Adapt structured data from script smb-enum-shares so that it is
easy to query when inserted in DB.
"""
if not table:
return table
result = {}
for field in ["account_used", "note"]:
if field in table:
result[field] = table.pop(field)
result["shares"] = [
dict(value, Share=key) for key, value in viewitems(table)
]
result["Shares"] = change_smb_enum_shares_migrate(table)
return result
|
18,050 |
def get_attempts_str(wait_time, nattempts):
attempts = 'attempt' if nattempts == 1 else 'attempts'
if nattempts > 1:
attempts_part = ' after {0:0.2f}s and {1:d} {2}'.format(
wait_time, nattempts, attempts)
else:
# Dont print anything if we succeeded immediately
attempts_part = ''
return attempts_part
|
def get_attempts_str(wait_time, nattempts):
attempts = 'attempt' if nattempts == 1 else 'attempts'
if nattempts >= 1:
attempts_part = ' after {0:0.2f}s and {1:d} {2}'.format(
wait_time, nattempts, attempts)
else:
# Dont print anything if we succeeded immediately
attempts_part = ''
return attempts_part
|
5,754 |
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
minimizer_kwargs=None, take_step=None, accept_test=None,
callback=None, interval=50, disp=False, niter_success=None,
seed=None, accept_rate=0.5, factor=0.9):
"""Find the global minimum of a function using the basin-hopping algorithm.
Basin-hopping is a two-phase method that combines a global stepping
algorithm with local minimization at each step. Designed to mimic
the natural process of energy minimization of clusters of atoms, it works
well for similar problems with "funnel-like, but rugged" energy landscapes
[5]_.
As the step-taking, step acceptance, and minimization methods are all
customizable, this function can also be used to implement other two-phase
methods.
Parameters
----------
func : callable ``f(x, *args)``
Function to be optimized. ``args`` can be passed as an optional item
in the dict ``minimizer_kwargs``
x0 : array_like
Initial guess.
niter : integer, optional
The number of basin-hopping iterations. There will be a total of
``niter + 1`` runs of the local minimizer.
T : float, optional
The "temperature" parameter for the accept or reject criterion. Higher
"temperatures" mean that larger jumps in function value will be
accepted. For best results ``T`` should be comparable to the
separation (in function value) between local minima.
stepsize : float, optional
Maximum step size for use in the random displacement.
minimizer_kwargs : dict, optional
Extra keyword arguments to be passed to the local minimizer
``scipy.optimize.minimize()`` Some important options could be:
method : str
The minimization method (e.g. ``"L-BFGS-B"``)
args : tuple
Extra arguments passed to the objective function (``func``) and
its derivatives (Jacobian, Hessian).
take_step : callable ``take_step(x)``, optional
Replace the default step-taking routine with this routine. The default
step-taking routine is a random displacement of the coordinates, but
other step-taking algorithms may be better for some systems.
``take_step`` can optionally have the attribute ``take_step.stepsize``.
If this attribute exists, then ``basinhopping`` will adjust
``take_step.stepsize`` in order to try to optimize the global minimum
search.
accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
Define a test which will be used to judge whether or not to accept the
step. This will be used in addition to the Metropolis test based on
"temperature" ``T``. The acceptable return values are True,
False, or ``"force accept"``. If any of the tests return False
then the step is rejected. If the latter, then this will override any
other tests in order to accept the step. This can be used, for example,
to forcefully escape from a local minimum that ``basinhopping`` is
trapped in.
callback : callable, ``callback(x, f, accept)``, optional
A callback function which will be called for all minima found. ``x``
and ``f`` are the coordinates and function value of the trial minimum,
and ``accept`` is whether or not that minimum was accepted. This can
be used, for example, to save the lowest N minima found. Also,
``callback`` can be used to specify a user defined stop criterion by
optionally returning True to stop the ``basinhopping`` routine.
interval : integer, optional
interval for how often to update the ``stepsize``
disp : bool, optional
Set to True to print status messages
niter_success : integer, optional
Stop the run if the global minimum candidate remains the same for this
number of iterations.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the default Metropolis
`accept_test` and the default `take_step`. If you supply your own
`take_step` and `accept_test`, and these functions use random
number generation, then those functions are responsible for the state
of their random number generator.
accept_rate : float, optional
The target acceptance rate that is used to adjust the ``stepsize``.
If the current acceptance rate is greater than the target,
then the ``stepsize`` is increased. Otherwise, it is decreased.
Default is 0.5.
factor : float, optional
The ``stepsize`` is multiplied or divided by this factor upon each
update. Default is 0.9.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination. The ``OptimizeResult`` object returned by the
selected minimizer at the lowest minimum is also contained within this
object and can be accessed through the ``lowest_optimization_result``
attribute. See `OptimizeResult` for a description of other attributes.
See Also
--------
minimize :
The local minimization function called once for each basinhopping step.
``minimizer_kwargs`` is passed to this routine.
Notes
-----
Basin-hopping is a stochastic algorithm which attempts to find the global
minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
[4]_. The algorithm in its current form was described by David Wales and
Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
The algorithm is iterative with each cycle composed of the following
features
1) random perturbation of the coordinates
2) local minimization
3) accept or reject the new coordinates based on the minimized function
value
The acceptance test used here is the Metropolis criterion of standard Monte
Carlo algorithms, although there are many other possibilities [3]_.
This global minimization method has been shown to be extremely efficient
for a wide variety of problems in physics and chemistry. It is
particularly useful when the function has many minima separated by large
barriers. See the Cambridge Cluster Database
http://www-wales.ch.cam.ac.uk/CCD.html for databases of molecular systems
that have been optimized primarily using basin-hopping. This database
includes minimization problems exceeding 300 degrees of freedom.
See the free software program GMIN (http://www-wales.ch.cam.ac.uk/GMIN) for
a Fortran implementation of basin-hopping. This implementation has many
different variations of the procedure described above, including more
advanced step taking algorithms and alternate acceptance criterion.
For stochastic global optimization there is no way to determine if the true
global minimum has actually been found. Instead, as a consistency check,
the algorithm can be run from a number of different random starting points
to ensure the lowest minimum found in each example has converged to the
global minimum. For this reason, ``basinhopping`` will by default simply
run for the number of iterations ``niter`` and return the lowest minimum
found. It is left to the user to ensure that this is in fact the global
minimum.
Choosing ``stepsize``: This is a crucial parameter in ``basinhopping`` and
depends on the problem being solved. The step is chosen uniformly in the
region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
should be comparable to the typical separation (in argument values) between
local minima of the function being optimized. ``basinhopping`` will, by
default, adjust ``stepsize`` to find an optimal value, but this may take
many iterations. You will get quicker results if you set a sensible
initial value for ``stepsize``.
Choosing ``T``: The parameter ``T`` is the "temperature" used in the
Metropolis criterion. Basinhopping steps are always accepted if
``func(xnew) < func(xold)``. Otherwise, they are accepted with
probability::
exp( -(func(xnew) - func(xold)) / T )
So, for best results, ``T`` should to be comparable to the typical
difference (in function values) between local minima. (The height of
"walls" between local minima is irrelevant.)
If ``T`` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
steps that increase energy are rejected.
.. versionadded:: 0.12.0
References
----------
.. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
Cambridge, UK.
.. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.
.. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
1987, 84, 6611.
.. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
crystals, and biomolecules, Science, 1999, 285, 1368.
.. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
a General and Versatile Optimization Framework for the Characterization
of Biological Macromolecules, Advances in Artificial Intelligence,
Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
Examples
--------
The following example is a 1-D minimization problem, with many
local minima superimposed on a parabola.
>>> from scipy.optimize import basinhopping
>>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
>>> x0=[1.]
Basinhopping, internally, uses a local minimization algorithm. We will use
the parameter ``minimizer_kwargs`` to tell basinhopping which algorithm to
use and how to set up that minimizer. This parameter will be passed to
``scipy.optimize.minimize()``.
>>> minimizer_kwargs = {"method": "BFGS"}
>>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200)
>>> print("global minimum: x = %.4f, f(x0) = %.4f" % (ret.x, ret.fun))
global minimum: x = -0.1951, f(x0) = -1.0009
Next consider a 2-D minimization problem. Also, this time, we
will use gradient information to significantly speed up the search.
>>> def func2d(x):
... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
... 0.2) * x[0]
... df = np.zeros(2)
... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
... df[1] = 2. * x[1] + 0.2
... return f, df
We'll also use a different local minimization algorithm. Also, we must tell
the minimizer that our function returns both energy and gradient (Jacobian).
>>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
>>> x0 = [1.0, 1.0]
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200)
>>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0],
... ret.x[1],
... ret.fun))
global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109
Here is an example using a custom step-taking routine. Imagine you want
the first coordinate to take larger steps than the rest of the coordinates.
This can be implemented like so:
>>> class MyTakeStep:
... def __init__(self, stepsize=0.5):
... self.stepsize = stepsize
... self.rng = np.random.default_rng()
... def __call__(self, x):
... s = self.stepsize
... x[0] += self.rng.uniform(-2.*s, 2.*s)
... x[1:] += self.rng.uniform(-s, s, x[1:].shape)
... return x
Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
of ``stepsize`` to optimize the search. We'll use the same 2-D function as
before
>>> mytakestep = MyTakeStep()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200, take_step=mytakestep)
>>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0],
... ret.x[1],
... ret.fun))
global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109
Now, let's do an example using a custom callback function which prints the
value of every minimum found
>>> def print_fun(x, f, accepted):
... print("at minimum %.4f accepted %d" % (f, int(accepted)))
We'll run it for only 10 basinhopping steps this time.
>>> rng = np.random.default_rng()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=10, callback=print_fun, seed=rng)
at minimum 0.4159 accepted 1
at minimum -0.4317 accepted 1
at minimum -1.0109 accepted 1
at minimum -0.9073 accepted 1
at minimum -0.4317 accepted 0
at minimum -0.1021 accepted 1
at minimum -0.7425 accepted 1
at minimum -0.9073 accepted 1
at minimum -0.4317 accepted 0
at minimum -0.7425 accepted 1
at minimum -0.9073 accepted 1
The minimum at -1.0109 is actually the global minimum, found already on the
8th iteration.
Now let's implement bounds on the problem using a custom ``accept_test``:
>>> class MyBounds:
... def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
... self.xmax = np.array(xmax)
... self.xmin = np.array(xmin)
... def __call__(self, **kwargs):
... x = kwargs["x_new"]
... tmax = bool(np.all(x <= self.xmax))
... tmin = bool(np.all(x >= self.xmin))
... return tmax and tmin
>>> mybounds = MyBounds()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=10, accept_test=mybounds)
"""
x0 = np.array(x0)
# set up the np.random generator
rng = check_random_state(seed)
# set up minimizer
if minimizer_kwargs is None:
minimizer_kwargs = dict()
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
**minimizer_kwargs)
# set up step-taking algorithm
if take_step is not None:
if not callable(take_step):
raise TypeError("take_step must be callable")
# if take_step.stepsize exists then use AdaptiveStepsize to control
# take_step.stepsize
if hasattr(take_step, "stepsize"):
take_step_wrapped = AdaptiveStepsize(take_step, interval=interval,
accept_rate=accept_rate,
factor=factor,
verbose=disp)
else:
take_step_wrapped = take_step
else:
# use default
displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
accept_rate=accept_rate,
factor=factor,
verbose=disp)
# set up accept tests
accept_tests = []
if accept_test is not None:
if not callable(accept_test):
raise TypeError("accept_test must be callable")
accept_tests = [accept_test]
# use default
metropolis = Metropolis(T, random_gen=rng)
accept_tests.append(metropolis)
if niter_success is None:
niter_success = niter + 2
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
accept_tests, disp=disp)
# The wrapped minimizer is called once during construction of
# BasinHoppingRunner, so run the callback
if callable(callback):
callback(bh.storage.minres.x, bh.storage.minres.fun, True)
# start main iteration loop
count, i = 0, 0
message = ["requested number of basinhopping iterations completed"
" successfully"]
for i in range(niter):
new_global_min = bh.one_cycle()
if callable(callback):
# should we pass a copy of x?
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
if val is not None:
if val:
message = ["callback function requested stop early by"
"returning True"]
break
count += 1
if new_global_min:
count = 0
elif count > niter_success:
message = ["success condition satisfied"]
break
# prepare return object
res = bh.res
res.lowest_optimization_result = bh.storage.get_lowest()
res.x = np.copy(res.lowest_optimization_result.x)
res.fun = res.lowest_optimization_result.fun
res.message = message
res.nit = i + 1
res.success = res.lowest_optimization_result.success
return res
|
def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
minimizer_kwargs=None, take_step=None, accept_test=None,
callback=None, interval=50, disp=False, niter_success=None,
seed=None, *, accept_rate=0.5, factor=0.9):
"""Find the global minimum of a function using the basin-hopping algorithm.
Basin-hopping is a two-phase method that combines a global stepping
algorithm with local minimization at each step. Designed to mimic
the natural process of energy minimization of clusters of atoms, it works
well for similar problems with "funnel-like, but rugged" energy landscapes
[5]_.
As the step-taking, step acceptance, and minimization methods are all
customizable, this function can also be used to implement other two-phase
methods.
Parameters
----------
func : callable ``f(x, *args)``
Function to be optimized. ``args`` can be passed as an optional item
in the dict ``minimizer_kwargs``
x0 : array_like
Initial guess.
niter : integer, optional
The number of basin-hopping iterations. There will be a total of
``niter + 1`` runs of the local minimizer.
T : float, optional
The "temperature" parameter for the accept or reject criterion. Higher
"temperatures" mean that larger jumps in function value will be
accepted. For best results ``T`` should be comparable to the
separation (in function value) between local minima.
stepsize : float, optional
Maximum step size for use in the random displacement.
minimizer_kwargs : dict, optional
Extra keyword arguments to be passed to the local minimizer
``scipy.optimize.minimize()`` Some important options could be:
method : str
The minimization method (e.g. ``"L-BFGS-B"``)
args : tuple
Extra arguments passed to the objective function (``func``) and
its derivatives (Jacobian, Hessian).
take_step : callable ``take_step(x)``, optional
Replace the default step-taking routine with this routine. The default
step-taking routine is a random displacement of the coordinates, but
other step-taking algorithms may be better for some systems.
``take_step`` can optionally have the attribute ``take_step.stepsize``.
If this attribute exists, then ``basinhopping`` will adjust
``take_step.stepsize`` in order to try to optimize the global minimum
search.
accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
Define a test which will be used to judge whether or not to accept the
step. This will be used in addition to the Metropolis test based on
"temperature" ``T``. The acceptable return values are True,
False, or ``"force accept"``. If any of the tests return False
then the step is rejected. If the latter, then this will override any
other tests in order to accept the step. This can be used, for example,
to forcefully escape from a local minimum that ``basinhopping`` is
trapped in.
callback : callable, ``callback(x, f, accept)``, optional
A callback function which will be called for all minima found. ``x``
and ``f`` are the coordinates and function value of the trial minimum,
and ``accept`` is whether or not that minimum was accepted. This can
be used, for example, to save the lowest N minima found. Also,
``callback`` can be used to specify a user defined stop criterion by
optionally returning True to stop the ``basinhopping`` routine.
interval : integer, optional
interval for how often to update the ``stepsize``
disp : bool, optional
Set to True to print status messages
niter_success : integer, optional
Stop the run if the global minimum candidate remains the same for this
number of iterations.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the default Metropolis
`accept_test` and the default `take_step`. If you supply your own
`take_step` and `accept_test`, and these functions use random
number generation, then those functions are responsible for the state
of their random number generator.
accept_rate : float, optional
The target acceptance rate that is used to adjust the ``stepsize``.
If the current acceptance rate is greater than the target,
then the ``stepsize`` is increased. Otherwise, it is decreased.
Default is 0.5.
factor : float, optional
The ``stepsize`` is multiplied or divided by this factor upon each
update. Default is 0.9.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination. The ``OptimizeResult`` object returned by the
selected minimizer at the lowest minimum is also contained within this
object and can be accessed through the ``lowest_optimization_result``
attribute. See `OptimizeResult` for a description of other attributes.
See Also
--------
minimize :
The local minimization function called once for each basinhopping step.
``minimizer_kwargs`` is passed to this routine.
Notes
-----
Basin-hopping is a stochastic algorithm which attempts to find the global
minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
[4]_. The algorithm in its current form was described by David Wales and
Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
The algorithm is iterative with each cycle composed of the following
features
1) random perturbation of the coordinates
2) local minimization
3) accept or reject the new coordinates based on the minimized function
value
The acceptance test used here is the Metropolis criterion of standard Monte
Carlo algorithms, although there are many other possibilities [3]_.
This global minimization method has been shown to be extremely efficient
for a wide variety of problems in physics and chemistry. It is
particularly useful when the function has many minima separated by large
barriers. See the Cambridge Cluster Database
http://www-wales.ch.cam.ac.uk/CCD.html for databases of molecular systems
that have been optimized primarily using basin-hopping. This database
includes minimization problems exceeding 300 degrees of freedom.
See the free software program GMIN (http://www-wales.ch.cam.ac.uk/GMIN) for
a Fortran implementation of basin-hopping. This implementation has many
different variations of the procedure described above, including more
advanced step taking algorithms and alternate acceptance criterion.
For stochastic global optimization there is no way to determine if the true
global minimum has actually been found. Instead, as a consistency check,
the algorithm can be run from a number of different random starting points
to ensure the lowest minimum found in each example has converged to the
global minimum. For this reason, ``basinhopping`` will by default simply
run for the number of iterations ``niter`` and return the lowest minimum
found. It is left to the user to ensure that this is in fact the global
minimum.
Choosing ``stepsize``: This is a crucial parameter in ``basinhopping`` and
depends on the problem being solved. The step is chosen uniformly in the
region from x0-stepsize to x0+stepsize, in each dimension. Ideally, it
should be comparable to the typical separation (in argument values) between
local minima of the function being optimized. ``basinhopping`` will, by
default, adjust ``stepsize`` to find an optimal value, but this may take
many iterations. You will get quicker results if you set a sensible
initial value for ``stepsize``.
Choosing ``T``: The parameter ``T`` is the "temperature" used in the
Metropolis criterion. Basinhopping steps are always accepted if
``func(xnew) < func(xold)``. Otherwise, they are accepted with
probability::
exp( -(func(xnew) - func(xold)) / T )
So, for best results, ``T`` should to be comparable to the typical
difference (in function values) between local minima. (The height of
"walls" between local minima is irrelevant.)
If ``T`` is 0, the algorithm becomes Monotonic Basin-Hopping, in which all
steps that increase energy are rejected.
.. versionadded:: 0.12.0
References
----------
.. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,
Cambridge, UK.
.. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and
the Lowest Energy Structures of Lennard-Jones Clusters Containing up to
110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.
.. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the
multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,
1987, 84, 6611.
.. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,
crystals, and biomolecules, Science, 1999, 285, 1368.
.. [5] Olson, B., Hashmi, I., Molloy, K., and Shehu1, A., Basin Hopping as
a General and Versatile Optimization Framework for the Characterization
of Biological Macromolecules, Advances in Artificial Intelligence,
Volume 2012 (2012), Article ID 674832, :doi:`10.1155/2012/674832`
Examples
--------
The following example is a 1-D minimization problem, with many
local minima superimposed on a parabola.
>>> from scipy.optimize import basinhopping
>>> func = lambda x: np.cos(14.5 * x - 0.3) + (x + 0.2) * x
>>> x0=[1.]
Basinhopping, internally, uses a local minimization algorithm. We will use
the parameter ``minimizer_kwargs`` to tell basinhopping which algorithm to
use and how to set up that minimizer. This parameter will be passed to
``scipy.optimize.minimize()``.
>>> minimizer_kwargs = {"method": "BFGS"}
>>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200)
>>> print("global minimum: x = %.4f, f(x0) = %.4f" % (ret.x, ret.fun))
global minimum: x = -0.1951, f(x0) = -1.0009
Next consider a 2-D minimization problem. Also, this time, we
will use gradient information to significantly speed up the search.
>>> def func2d(x):
... f = np.cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +
... 0.2) * x[0]
... df = np.zeros(2)
... df[0] = -14.5 * np.sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
... df[1] = 2. * x[1] + 0.2
... return f, df
We'll also use a different local minimization algorithm. Also, we must tell
the minimizer that our function returns both energy and gradient (Jacobian).
>>> minimizer_kwargs = {"method":"L-BFGS-B", "jac":True}
>>> x0 = [1.0, 1.0]
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200)
>>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0],
... ret.x[1],
... ret.fun))
global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109
Here is an example using a custom step-taking routine. Imagine you want
the first coordinate to take larger steps than the rest of the coordinates.
This can be implemented like so:
>>> class MyTakeStep:
... def __init__(self, stepsize=0.5):
... self.stepsize = stepsize
... self.rng = np.random.default_rng()
... def __call__(self, x):
... s = self.stepsize
... x[0] += self.rng.uniform(-2.*s, 2.*s)
... x[1:] += self.rng.uniform(-s, s, x[1:].shape)
... return x
Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude
of ``stepsize`` to optimize the search. We'll use the same 2-D function as
before
>>> mytakestep = MyTakeStep()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=200, take_step=mytakestep)
>>> print("global minimum: x = [%.4f, %.4f], f(x0) = %.4f" % (ret.x[0],
... ret.x[1],
... ret.fun))
global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109
Now, let's do an example using a custom callback function which prints the
value of every minimum found
>>> def print_fun(x, f, accepted):
... print("at minimum %.4f accepted %d" % (f, int(accepted)))
We'll run it for only 10 basinhopping steps this time.
>>> rng = np.random.default_rng()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=10, callback=print_fun, seed=rng)
at minimum 0.4159 accepted 1
at minimum -0.4317 accepted 1
at minimum -1.0109 accepted 1
at minimum -0.9073 accepted 1
at minimum -0.4317 accepted 0
at minimum -0.1021 accepted 1
at minimum -0.7425 accepted 1
at minimum -0.9073 accepted 1
at minimum -0.4317 accepted 0
at minimum -0.7425 accepted 1
at minimum -0.9073 accepted 1
The minimum at -1.0109 is actually the global minimum, found already on the
8th iteration.
Now let's implement bounds on the problem using a custom ``accept_test``:
>>> class MyBounds:
... def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
... self.xmax = np.array(xmax)
... self.xmin = np.array(xmin)
... def __call__(self, **kwargs):
... x = kwargs["x_new"]
... tmax = bool(np.all(x <= self.xmax))
... tmin = bool(np.all(x >= self.xmin))
... return tmax and tmin
>>> mybounds = MyBounds()
>>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,
... niter=10, accept_test=mybounds)
"""
x0 = np.array(x0)
# set up the np.random generator
rng = check_random_state(seed)
# set up minimizer
if minimizer_kwargs is None:
minimizer_kwargs = dict()
wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,
**minimizer_kwargs)
# set up step-taking algorithm
if take_step is not None:
if not callable(take_step):
raise TypeError("take_step must be callable")
# if take_step.stepsize exists then use AdaptiveStepsize to control
# take_step.stepsize
if hasattr(take_step, "stepsize"):
take_step_wrapped = AdaptiveStepsize(take_step, interval=interval,
accept_rate=accept_rate,
factor=factor,
verbose=disp)
else:
take_step_wrapped = take_step
else:
# use default
displace = RandomDisplacement(stepsize=stepsize, random_gen=rng)
take_step_wrapped = AdaptiveStepsize(displace, interval=interval,
accept_rate=accept_rate,
factor=factor,
verbose=disp)
# set up accept tests
accept_tests = []
if accept_test is not None:
if not callable(accept_test):
raise TypeError("accept_test must be callable")
accept_tests = [accept_test]
# use default
metropolis = Metropolis(T, random_gen=rng)
accept_tests.append(metropolis)
if niter_success is None:
niter_success = niter + 2
bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,
accept_tests, disp=disp)
# The wrapped minimizer is called once during construction of
# BasinHoppingRunner, so run the callback
if callable(callback):
callback(bh.storage.minres.x, bh.storage.minres.fun, True)
# start main iteration loop
count, i = 0, 0
message = ["requested number of basinhopping iterations completed"
" successfully"]
for i in range(niter):
new_global_min = bh.one_cycle()
if callable(callback):
# should we pass a copy of x?
val = callback(bh.xtrial, bh.energy_trial, bh.accept)
if val is not None:
if val:
message = ["callback function requested stop early by"
"returning True"]
break
count += 1
if new_global_min:
count = 0
elif count > niter_success:
message = ["success condition satisfied"]
break
# prepare return object
res = bh.res
res.lowest_optimization_result = bh.storage.get_lowest()
res.x = np.copy(res.lowest_optimization_result.x)
res.fun = res.lowest_optimization_result.fun
res.message = message
res.nit = i + 1
res.success = res.lowest_optimization_result.success
return res
|
30,145 |
def fetch_production(zone_key='AR', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
target_datetime: if we want to parser for a specific time and not latest
logger: where to log useful information
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime is not None:
raise NotImplementedError('This parser is not yet able to parse past dates')
gdt = get_datetime(session=None)
thermal = get_thermal(session, logger)
hydro = get_hydro_and_renewables(session, logger)
# discharging is given positive value in data, opposite to EM
hydro_storage = hydro.pop('hydro_storage')
if hydro_storage == 0.0:
em_hydro_storage = hydro_storage
else:
em_hydro_storage = hydro_storage*-1
unknown = thermal.pop('unknown') + hydro.pop('unknown')
production = {**hydro, **thermal}
production['unknown'] = unknown
production_mix = {
'zoneKey': zone_key,
'datetime': gdt['datetime'],
'production': production,
'storage': {
'hydro': em_hydro_storage,
},
'source': 'portalweb.cammesa.com'
}
return production_mix
|
def fetch_production(zone_key='AR', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
target_datetime: if we want to parser for a specific time and not latest
logger: where to log useful information
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime is not None:
raise NotImplementedError('This parser is not yet able to parse past dates')
gdt = get_datetime(session=None)
thermal = get_thermal(session, logger)
hydro = get_hydro_and_renewables(session, logger)
# discharging is given positive value in data, opposite to EM
em_hydro_storage = -1 * hydro.pop('hydro_storage')
if hydro_storage == 0.0:
em_hydro_storage = hydro_storage
else:
em_hydro_storage = hydro_storage*-1
unknown = thermal.pop('unknown') + hydro.pop('unknown')
production = {**hydro, **thermal}
production['unknown'] = unknown
production_mix = {
'zoneKey': zone_key,
'datetime': gdt['datetime'],
'production': production,
'storage': {
'hydro': em_hydro_storage,
},
'source': 'portalweb.cammesa.com'
}
return production_mix
|
28,048 |
def __get_report_hashes(f: TextIOWrapper) -> List[str]:
""" Get report hashes from the given file. """
return [h for h in f.read().split('\n') if h]
|
def __get_report_hashes(f: TextIOWrapper) -> List[str]:
""" Get report hashes from the given file. """
return [h for h in f.readlines() if h]
|
30,535 |
def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list:
"""
Finds indicators using demisto.searchIndicators
"""
# calculate the starting page (each page holds 200 entries)
if offset:
next_page = int(offset / 200)
# set the offset from the starting page
parsed_offset = offset - (200 * next_page)
else:
next_page = 0
parsed_offset = 0
iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page)
return iocs[parsed_offset:limit + parsed_offset]
|
def find_indicators_with_limit(indicator_query: str, limit: int, offset: int) -> list:
"""
Finds indicators using demisto.searchIndicators
"""
# calculate the starting page (each page holds 200 entries)
if offset:
next_page = int(offset / PAGE_SIZE)
# set the offset from the starting page
parsed_offset = offset - (200 * next_page)
else:
next_page = 0
parsed_offset = 0
iocs, _ = find_indicators_with_limit_loop(indicator_query, limit, next_page=next_page)
return iocs[parsed_offset:limit + parsed_offset]
|
30,354 |
def get_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
response = get_branch(branch_name)
commit = response.get('commit', {})
author = commit.get('author', {})
parents = commit.get('parents', [])
ec_object = {
'Name': response.get('name'),
'CommitSHA': commit.get('sha'),
'CommitNodeID': commit.get('node_id'),
'CommitAuthorID': author.get('id'),
'CommitAuthorLogin': author.get('login'),
'CommitParentSHA': [parent.get('sha') for parent in parents],
'Protected': response.get('protected')
}
ec = {
'GitHub.Branch(val.Name === obj.name && val.CommitSHA === obj.CommitSHA)': ec_object
}
human_readable = tableToMarkdown('Branch', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
def get_branch_command():
args = demisto.args()
branch_name = args.get('branch_name')
response = get_branch(branch_name)
commit = response.get('commit', {})
author = commit.get('author', {})
parents = commit.get('parents', [])
ec_object = {
'Name': response.get('name'),
'CommitSHA': commit.get('sha'),
'CommitNodeID': commit.get('node_id'),
'CommitAuthorID': author.get('id'),
'CommitAuthorLogin': author.get('login'),
'CommitParentSHA': [parent.get('sha') for parent in parents],
'Protected': response.get('protected')
}
ec = {
'GitHub.Branch(val.Name === obj.Name && val.CommitSHA === obj.CommitSHA)': ec_object
}
human_readable = tableToMarkdown('Branch', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
56,694 |
def update_author(
akey, a=None, handle_redirects=True
) -> Optional[list[SolrUpdateRequest]]:
"""
Get the Solr requests necessary to insert/update/delete an Author in Solr.
:param akey: The author key, e.g. /authors/OL23A
:param dict a: Optional Author
:param bool handle_redirects: If true, remove from Solr all authors that redirect to this one
"""
if akey == '/authors/':
return None
m = re_author_key.match(akey)
if not m:
logger.error('bad key: %s', akey)
assert m
author_id = m.group(1)
if not a:
a = data_provider.get_document(akey)
if a['type']['key'] in ('/type/redirect', '/type/delete') or not a.get(
'name', None
):
return [DeleteRequest([akey])]
try:
assert a['type']['key'] == '/type/author'
except AssertionError:
logger.error("AssertionError: %s", a['type']['key'])
raise
facet_fields = ['subject', 'time', 'person', 'place']
base_url = get_solr_base_url() + '/select'
reply = requests.get(
base_url,
params=[
('wt', 'json'),
('json.nl', 'arrarr'),
('q', 'author_key:%s' % author_id),
('sort', 'edition_count desc'),
('row', 1),
('fl', 'title,subtitle'),
('facet', 'true'),
('facet.mincount', 1),
]
+ [('facet.field', '%s_facet' % field) for field in facet_fields],
).json() # type: ignore
work_count = reply['response']['numFound']
docs = reply['response'].get('docs', [])
top_work = None
if docs and docs[0].get('title', None):
top_work = docs[0]['title']
if docs[0].get('subtitle', None):
top_work += ': ' + docs[0]['subtitle']
all_subjects = []
for f in facet_fields:
for s, num in reply['facet_counts']['facet_fields'][f + '_facet']:
all_subjects.append((num, s))
all_subjects.sort(reverse=True)
top_subjects = [s for num, s in all_subjects[:10]]
d = cast(
SolrDocument,
{
'key': f'/authors/{author_id}',
'type': 'author',
},
)
if a.get('name', None):
d['name'] = a['name']
alternate_names = a.get('alternate_names', [])
if alternate_names:
d['alternate_names'] = alternate_names
if a.get('birth_date', None):
d['birth_date'] = a['birth_date']
if a.get('death_date', None):
d['death_date'] = a['death_date']
if a.get('date', None):
d['date'] = a['date']
if top_work:
d['top_work'] = top_work
d['work_count'] = work_count
d['top_subjects'] = top_subjects
solr_requests: list[SolrUpdateRequest] = []
if handle_redirects:
redirect_keys = data_provider.find_redirects(akey)
# redirects = ''.join('<id>{}</id>'.format(k) for k in redirect_keys)
# q = {'type': '/type/redirect', 'location': akey}
# try:
# redirects = ''.join('<id>%s</id>' % re_author_key.match(r['key']).group(1) for r in query_iter(q))
# except AttributeError:
# logger.error('AssertionError: redirects: %r', [r['key'] for r in query_iter(q)])
# raise
# if redirects:
# solr_requests.append('<delete>' + redirects + '</delete>')
if redirect_keys:
solr_requests.append(DeleteRequest(redirect_keys))
solr_requests.append(AddRequest(d))
return solr_requests
|
def update_author(
akey, a=None, handle_redirects=True
) -> Optional[list[SolrUpdateRequest]]:
"""
Get the Solr requests necessary to insert/update/delete an Author in Solr.
:param akey: The author key, e.g. /authors/OL23A
:param dict a: Optional Author
:param bool handle_redirects: If true, remove from Solr all authors that redirect to this one
"""
if akey == '/authors/':
return None
m = re_author_key.match(akey)
if not m:
logger.error('bad key: %s', akey)
assert m
author_id = m.group(1)
if not a:
a = data_provider.get_document(akey)
if a['type']['key'] in ('/type/redirect', '/type/delete') or not a.get(
'name', None
):
return [DeleteRequest([akey])]
try:
assert a['type']['key'] == '/type/author'
except AssertionError:
logger.error("AssertionError: %s", a['type']['key'])
raise
facet_fields = ['subject', 'time', 'person', 'place']
base_url = get_solr_base_url() + '/select'
reply = requests.get(
base_url,
params={
'wt': 'json',
'json.nl': 'arrarr',
'q': 'author_key:%s' % author_id,
'sort': 'edition_count desc',
'row': 1,
'fl': 'title,subtitle',
'facet': 'true',
'facet.mincount': 1,
}
+ [('facet.field', '%s_facet' % field) for field in facet_fields],
).json() # type: ignore
work_count = reply['response']['numFound']
docs = reply['response'].get('docs', [])
top_work = None
if docs and docs[0].get('title', None):
top_work = docs[0]['title']
if docs[0].get('subtitle', None):
top_work += ': ' + docs[0]['subtitle']
all_subjects = []
for f in facet_fields:
for s, num in reply['facet_counts']['facet_fields'][f + '_facet']:
all_subjects.append((num, s))
all_subjects.sort(reverse=True)
top_subjects = [s for num, s in all_subjects[:10]]
d = cast(
SolrDocument,
{
'key': f'/authors/{author_id}',
'type': 'author',
},
)
if a.get('name', None):
d['name'] = a['name']
alternate_names = a.get('alternate_names', [])
if alternate_names:
d['alternate_names'] = alternate_names
if a.get('birth_date', None):
d['birth_date'] = a['birth_date']
if a.get('death_date', None):
d['death_date'] = a['death_date']
if a.get('date', None):
d['date'] = a['date']
if top_work:
d['top_work'] = top_work
d['work_count'] = work_count
d['top_subjects'] = top_subjects
solr_requests: list[SolrUpdateRequest] = []
if handle_redirects:
redirect_keys = data_provider.find_redirects(akey)
# redirects = ''.join('<id>{}</id>'.format(k) for k in redirect_keys)
# q = {'type': '/type/redirect', 'location': akey}
# try:
# redirects = ''.join('<id>%s</id>' % re_author_key.match(r['key']).group(1) for r in query_iter(q))
# except AttributeError:
# logger.error('AssertionError: redirects: %r', [r['key'] for r in query_iter(q)])
# raise
# if redirects:
# solr_requests.append('<delete>' + redirects + '</delete>')
if redirect_keys:
solr_requests.append(DeleteRequest(redirect_keys))
solr_requests.append(AddRequest(d))
return solr_requests
|
51,058 |
def list_statistic_ids(
hass: HomeAssistant, statistic_type: str | None = None
) -> list[StatisticMetaData | None]:
"""Return statistic_ids and meta data."""
units = hass.config.units
statistic_ids = {}
with session_scope(hass=hass) as session:
metadata = _get_metadata(hass, session, None, statistic_type)
for meta in metadata.values():
unit = meta["unit_of_measurement"]
if unit is not None:
unit = _configured_unit(unit, units)
meta["unit_of_measurement"] = unit
statistic_ids = {
meta["statistic_id"]: meta["unit_of_measurement"]
for meta in metadata.values()
}
for platform in hass.data[DOMAIN].values():
if not hasattr(platform, "list_statistic_ids"):
continue
platform_statistic_ids = platform.list_statistic_ids(hass, statistic_type)
for statistic_id, unit in platform_statistic_ids.items():
if unit is not None:
unit = _configured_unit(unit, units)
platform_statistic_ids[statistic_id] = unit
statistic_ids = {**platform_statistic_ids, **statistic_ids}
return [
{"statistic_id": _id, "unit_of_measurement": unit}
for _id, unit in statistic_ids.items()
]
|
def list_statistic_ids(
hass: HomeAssistant, statistic_type: str | None = None
) -> list[StatisticMetaData | None]:
"""Return statistic_ids and meta data."""
units = hass.config.units
statistic_ids = {}
with session_scope(hass=hass) as session:
metadata = _get_metadata(hass, session, None, statistic_type)
for meta in metadata.values():
unit = meta["unit_of_measurement"]
if unit is not None:
unit = _configured_unit(unit, units)
meta["unit_of_measurement"] = unit
statistic_ids = {
meta["statistic_id"]: meta["unit_of_measurement"]
for meta in metadata.values()
}
for platform in hass.data[DOMAIN].values():
if not hasattr(platform, "list_statistic_ids"):
continue
platform_statistic_ids = platform.list_statistic_ids(hass, statistic_type)
for statistic_id, unit in platform_statistic_ids.items():
if unit is not None:
unit = _configured_unit(unit, units)
platform_statistic_ids[statistic_id] = unit
for key, value in platform_statistic_ids.items():
statistic_ids.setdefault(key, value)
return [
{"statistic_id": _id, "unit_of_measurement": unit}
for _id, unit in statistic_ids.items()
]
|
8,396 |
def test_template_match_minimal_overlap():
"""
Test template_match when both observed and template spectra have minimal overlap on the wavelength axis
"""
print("minimal overlap test")
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_no_overlap = np.linspace(45, 95, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_no_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
assert np.isnan(tm_result[1])
|
def test_template_match_minimal_overlap():
"""
Test template_match when both observed and template spectra have minimal overlap on the wavelength axis
"""
print("minimal overlap test")
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_no_overlap = np.linspace(45, 95, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_min_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
assert np.isnan(tm_result[1])
|
36,357 |
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False, module=None, qualname=None):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
'module' should be set to the module this class is being created in; if it
is not set, an attempt to find that module will be made, but if it fails the
class will not be picklable.
'qualname' should be set to the actual location this call can be found in
its module; by default it is set to the global scope. If this is not correct,
pickle will fail in some circumstances.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f'Invalid field: {item!r}')
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f'Field names must be valid identifers: {name!r}')
if keyword.iskeyword(name):
raise TypeError(f'Field names must not be keywords: {name!r}')
if name in seen:
raise TypeError(f'Field name duplicated: {name!r}')
seen.add(name)
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
# TODO: this hack is the same that can be found in enum.py and should be
# removed if there ever is a way to get the caller module.
if module is None:
try:
module = sys._getframe(1).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(cls)
else:
cls.__module__ = module
if qualname is not None:
cls.__qualname__ = qualname
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
|
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False, module=None, qualname=None):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
'module' should be set to the module this class is being created in; if it
is not set, an attempt to find that module will be made, but if it fails the
class will not be picklable.
'qualname' should be set to the actual location this call can be found in
its module; by default it is set to the global scope. If this is not correct,
pickle will fail in some circumstances.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
# While we're looking through the field names, validate that they
# are identifiers, are not keywords, and not duplicates.
seen = set()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
else:
raise TypeError(f'Invalid field: {item!r}')
if not isinstance(name, str) or not name.isidentifier():
raise TypeError(f'Field names must be valid identifers: {name!r}')
if keyword.iskeyword(name):
raise TypeError(f'Field names must not be keywords: {name!r}')
if name in seen:
raise TypeError(f'Field name duplicated: {name!r}')
seen.add(name)
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
# TODO: this hack is the same that can be found in enum.py and should be
# removed if there ever is a way to get the caller module.
if module is None:
try:
module = sys._getframe(1).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(cls)
else:
cls.__module__ = module
if qualname is not None:
cls.__qualname__ = qualname
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
|
7,044 |
def process_plugins(fpath):
# Load Rose Vars, if a ``rose-workflow.conf`` file is present.
extra_vars = {
'env': {},
'template_variables': {},
'templating_detected': None
}
for entry_point in pkg_resources.iter_entry_points(
'cylc.pre_configure'
):
try:
plugin_result = entry_point.resolve()(srcdir=fpath)
except Exception as exc:
# NOTE: except Exception (purposefully vague)
# this is to separate plugin from core Cylc errors
raise PluginError(
'cylc.pre_configure',
entry_point.name,
exc
) from None
for section in ['env', 'template_variables']:
if section in plugin_result and plugin_result[section] is not None:
# Raise error if multiple plugins try to update the same keys.
section_update = plugin_result.get(section, {})
keys_collision = (
extra_vars[section].keys() & section_update.keys()
)
if keys_collision:
raise ParsecError(
f"{entry_point.name} is trying to alter "
f"[{section}]{', '.join(sorted(keys_collision))}."
)
extra_vars[section].update(section_update)
if (
'templating_detected' in plugin_result and
plugin_result['templating_detected'] is not None and
extra_vars['templating_detected'] is not None and
extra_vars['templating_detected'] !=
plugin_result['templating_detected']
):
# Don't allow subsequent plugins with different templating_detected
raise ParsecError(
"Can't merge templating languages "
f"{extra_vars['templating_detected']} and "
f"{plugin_result['templating_detected']}"
)
elif(
'templating_detected' in plugin_result and
plugin_result['templating_detected'] is not None
):
extra_vars['templating_detected'] = plugin_result[
'templating_detected'
]
return extra_vars
|
def process_plugins(fpath):
# Load Rose Vars, if a ``rose-suite.conf`` file is present.
extra_vars = {
'env': {},
'template_variables': {},
'templating_detected': None
}
for entry_point in pkg_resources.iter_entry_points(
'cylc.pre_configure'
):
try:
plugin_result = entry_point.resolve()(srcdir=fpath)
except Exception as exc:
# NOTE: except Exception (purposefully vague)
# this is to separate plugin from core Cylc errors
raise PluginError(
'cylc.pre_configure',
entry_point.name,
exc
) from None
for section in ['env', 'template_variables']:
if section in plugin_result and plugin_result[section] is not None:
# Raise error if multiple plugins try to update the same keys.
section_update = plugin_result.get(section, {})
keys_collision = (
extra_vars[section].keys() & section_update.keys()
)
if keys_collision:
raise ParsecError(
f"{entry_point.name} is trying to alter "
f"[{section}]{', '.join(sorted(keys_collision))}."
)
extra_vars[section].update(section_update)
if (
'templating_detected' in plugin_result and
plugin_result['templating_detected'] is not None and
extra_vars['templating_detected'] is not None and
extra_vars['templating_detected'] !=
plugin_result['templating_detected']
):
# Don't allow subsequent plugins with different templating_detected
raise ParsecError(
"Can't merge templating languages "
f"{extra_vars['templating_detected']} and "
f"{plugin_result['templating_detected']}"
)
elif(
'templating_detected' in plugin_result and
plugin_result['templating_detected'] is not None
):
extra_vars['templating_detected'] = plugin_result[
'templating_detected'
]
return extra_vars
|
42,822 |
def move_git_repo(source_path, new_path):
"""
Moves git folder and .gitignore to the new backup directory.
"""
if os.path.exists(os.path.join(new_path, '.git')) or os.path.exists(os.path.join(new_path, '.gitignore')):
print_red_bold("Git repository already exists new path ({})".format(new_path))
print_red_bold("Please choose a different directory")
sys.exit()
git_dir = os.path.join(source_path, '.git')
git_ignore_file = os.path.join(source_path, '.gitignore')
try:
move(git_dir, new_path)
move(git_ignore_file, new_path)
print_blue_bold("Moving git repo to new location.")
except FileNotFoundError:
pass
|
def move_git_repo(source_path, new_path):
"""
Moves git folder and .gitignore to the new backup directory.
"""
if os.path.exists(os.path.join(new_path, '.git')) or os.path.exists(os.path.join(new_path, '.gitignore')):
print_red_bold("Git repository already exists new path ({})".format(new_path))
print_red_bold("Please choose a different backup path.")
sys.exit()
git_dir = os.path.join(source_path, '.git')
git_ignore_file = os.path.join(source_path, '.gitignore')
try:
move(git_dir, new_path)
move(git_ignore_file, new_path)
print_blue_bold("Moving git repo to new location.")
except FileNotFoundError:
pass
|
1,573 |
def load_wine(return_X_y=False, as_frame=False):
"""Load and return the wine dataset (classification).
.. versionadded:: 0.18
The wine dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class [59,71,48]
Samples total 178
Dimensionality 13
Features real, positive
================= ==============
Read more in the :ref:`User Guide <wine_dataset>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
as_frame : boolean, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data', the
data to learn, 'target', the classification labels, 'target_names', the
meaning of the labels, 'feature_names', the meaning of the features,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit
standard format from:
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
Examples
--------
Let's say you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'wine_data.csv')
with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['alcohol',
'malic_acid',
'ash',
'alcalinity_of_ash',
'magnesium',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color_intensity',
'hue',
'od280/od315_of_diluted_wines',
'proline']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_wine",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
|
def load_wine(return_X_y=False, as_frame=False):
"""Load and return the wine dataset (classification).
.. versionadded:: 0.18
The wine dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class [59,71,48]
Samples total 178
Dimensionality 13
Features real, positive
================= ==============
Read more in the :ref:`User Guide <wine_dataset>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data', the
data to learn, 'target', the classification labels, 'target_names', the
meaning of the labels, 'feature_names', the meaning of the features,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit
standard format from:
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
Examples
--------
Let's say you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'wine_data.csv')
with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['alcohol',
'malic_acid',
'ash',
'alcalinity_of_ash',
'magnesium',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color_intensity',
'hue',
'od280/od315_of_diluted_wines',
'proline']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_wine",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
|
41,908 |
def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
def _get_contour_plot(study: Study, params: Optional[List[str]] = None) -> "go.Figure":
layout = go.Layout(title="Contour Plot")
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return go.Figure(data=[], layout=layout)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is None:
sorted_params = sorted(list(all_params))
elif len(params) <= 1:
_logger.warning("The length of params must be greater than 1.")
return go.Figure(data=[], layout=layout)
else:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError("Parameter {} does not exist in your study.".format(input_p_name))
sorted_params = sorted(list(set(params)))
padding_ratio = 0.05
param_values_range = {}
update_category_axes = {}
for p_name in sorted_params:
values = [t.params[p_name] for t in trials if p_name in t.params]
min_value = min(values)
max_value = max(values)
if _is_log_scale(trials, p_name):
padding = (math.log10(max_value) - math.log10(min_value)) * padding_ratio
min_value = math.pow(10, math.log10(min_value) - padding)
max_value = math.pow(10, math.log10(max_value) + padding)
elif _is_categorical(trials, p_name):
update_category_axes[p_name] = any([str(v).isnumeric() for v in set(values)])
else:
padding = (max_value - min_value) * padding_ratio
min_value = min_value - padding
max_value = max_value + padding
param_values_range[p_name] = (min_value, max_value)
if len(sorted_params) == 2:
x_param = sorted_params[0]
y_param = sorted_params[1]
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
figure = go.Figure(data=sub_plots, layout=layout)
figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category")
if _is_categorical(trials, y_param) and update_category_axes[y_param]:
figure.update_yaxes(type="category")
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log")
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log")
else:
figure = make_subplots(
rows=len(sorted_params), cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True
)
figure.update_layout(layout)
showscale = True # showscale option only needs to be specified once
for x_i, x_param in enumerate(sorted_params):
for y_i, y_param in enumerate(sorted_params):
if x_param == y_param:
figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
else:
sub_plots = _generate_contour_subplot(
trials, x_param, y_param, study.direction, param_values_range
)
contour = sub_plots[0]
scatter = sub_plots[1]
contour.update(showscale=showscale) # showscale's default is True
if showscale:
showscale = False
figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
figure.update_xaxes(range=param_values_range[x_param], row=y_i + 1, col=x_i + 1)
figure.update_yaxes(range=param_values_range[y_param], row=y_i + 1, col=x_i + 1)
if _is_categorical(trials, x_param) and update_category_axes[x_param]:
figure.update_xaxes(type="category", row=y_i + 1, col=x_i + 1)
if update_category_axes.get(y_param, False):
figure.update_yaxes(type="category", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, x_param):
log_range = [math.log10(p) for p in param_values_range[x_param]]
figure.update_xaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if _is_log_scale(trials, y_param):
log_range = [math.log10(p) for p in param_values_range[y_param]]
figure.update_yaxes(range=log_range, type="log", row=y_i + 1, col=x_i + 1)
if x_i == 0:
figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
if y_i == len(sorted_params) - 1:
figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)
return figure
|
34,369 |
def _get_available_endpoints(endpoints_path: Optional[Text]) -> AvailableEndpoints:
"""Get `AvailableEndpoints` object from specified path.
Args:
endpoints_path: Path of the endpoints file to be read. If `None` the
default path for that file is used ('endpoints.yml').
Returns:
`AvailableEndpoints` object read from endpoints file.
"""
endpoints_config_path = cli_utils.get_validated_path(
endpoints_path, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
return AvailableEndpoints.read_endpoints(endpoints_config_path)
|
def _get_available_endpoints(endpoints_path: Optional[Text]) -> AvailableEndpoints:
"""Get `AvailableEndpoints` object from specified path.
Args:
endpoints_path: Path of the endpoints file to be read. If `None` the
default path for that file is used (`endpoints.yml`).
Returns:
`AvailableEndpoints` object read from endpoints file.
"""
endpoints_config_path = cli_utils.get_validated_path(
endpoints_path, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
return AvailableEndpoints.read_endpoints(endpoints_config_path)
|
46,549 |
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
block_length_or_custody_data,
invalid_chunk_data=False):
if isinstance(block_length_or_custody_data, int):
custody_data = get_custody_test_vector(block_length_or_custody_data)
else:
custody_data = block_length_or_custody_data
custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data)
chunks = custody_chunkify(spec, custody_data_block)
chunk_index = chunk_challenge.chunk_index
leaf_index = chunk_index + 2**spec.CUSTODY_RESPONSE_DEPTH
serialized_length = (len(custody_data_block)).to_bytes(32, 'little')
data_branch = build_proof(custody_data_block.get_backing().get_left(), leaf_index) + [serialized_length]
return spec.CustodyChunkResponse(
challenge_index=challenge_index,
chunk_index=chunk_index,
chunk=chunks[chunk_index],
branch=data_branch,
)
|
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
block_length_or_custody_data,
invalid_chunk_data=False):
if isinstance(block_length_or_custody_data, int):
custody_data = get_custody_test_vector(block_length_or_custody_data)
else:
custody_data = block_length_or_custody_data
custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data)
chunks = custody_chunkify(spec, custody_data_block)
chunk_index = chunk_challenge.chunk_index
leaf_index = chunk_index + 2**spec.CUSTODY_RESPONSE_DEPTH
serialized_length = len(custody_data_block).to_bytes(32, 'little')
data_branch = build_proof(custody_data_block.get_backing().get_left(), leaf_index) + [serialized_length]
return spec.CustodyChunkResponse(
challenge_index=challenge_index,
chunk_index=chunk_index,
chunk=chunks[chunk_index],
branch=data_branch,
)
|
6,070 |
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict[queueName] = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60. / 250. * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, basestring):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
|
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict[queueName] = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60 / 250 * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, basestring):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
|
50,370 |
def switch_map(
mapper: Optional[Mapper[_T1, Observable[_T2]]] = None
) -> Callable[[Observable[_T1]], Observable[_T2]]:
"""
The switch_map operator.
Project each element of an observable sequence into a new observable.
.. marble::
:alt: switch_map
---1---2---3--->
[ switch_map(i: of(i, i ** 2, i ** 3)) ]
---1---1---1---2---4---8---3---9---27--->
Example:
>>> switch_map(lambda value: of(value, value // 2))
Args:
mapper: A transform function to apply to each source element.
Returns:
A partially applied operator function that takes an observable
source and returns an observable sequence whose elements are
each element of the result of invoking the transform function
on each element of the source.
"""
from ._switch_map import switch_map_
return switch_map_(mapper)
|
def switch_map(
mapper: Optional[Mapper[_T1, Observable[_T2]]] = None
) -> Callable[[Observable[_T1]], Observable[_T2]]:
"""
The switch_map operator.
Project each element of an observable sequence into a new observable.
.. marble::
:alt: switch_map
---1---2---3--->
[ switch_map(i: of(i, i ** 2, i ** 3)) ]
---1---1---1---2---4---8---3---9---27--->
Example:
>>> switch_map(lambda value: of(value, value // 2))
Args:
mapper: A transform function to apply to each source element.
Returns:
A partially applied operator function that takes an observable
source and returns an observable sequence whose elements are
each element of the result of invoking the transform function
on each element of the source.
"""
mapper_: typing.Mapper[_T1, Union[Future[_T2], Observable[_T2]]] = mapper or cast(
Callable[[_T1], Observable[_T2]], of
)
return compose(
map(mapper_),
switch_latest(),
)
|
28,621 |
def from_beanmachine(
sampler=None,
*,
coords=None,
dims=None,
):
"""Convert Bean Machine MonteCarloSamples object into an InferenceData object.
For a usage example read the
:ref:`Creating InferenceData section on from_beanmachine <creating_InferenceData>`
Parameters
----------
sampler : bm.MonteCarloSamples
Fitted MonteCarloSamples object from Bean Machine
coords : dict[str] -> list[str]
Map of dimensions to coordinates
dims : dict[str] -> list[str]
Map variable names to their coordinates
"""
return BMConverter(
sampler=sampler,
coords=coords,
dims=dims,
).to_inference_data()
|
def from_beanmachine(
sampler=None,
*,
coords=None,
dims=None,
):
"""Convert Bean Machine MonteCarloSamples object into an InferenceData object.
For a usage example read the
:ref:`Creating InferenceData section on from_beanmachine <creating_InferenceData>`
Parameters
----------
sampler : bm.MonteCarloSamples
Fitted MonteCarloSamples object from Bean Machine
coords : dict[str] -> list[str]
Map of dimensions to coordinates
dims : dict of {str : list of str}
Map variable names to their coordinates
"""
return BMConverter(
sampler=sampler,
coords=coords,
dims=dims,
).to_inference_data()
|
6,961 |
def fetch_details_from_tag(_tag: str) -> Tuple[str, str, str]:
app_tag = _tag.split("@")
org_repo = app_tag[0].split("/")
try:
repo, tag = app_tag
except ValueError:
repo, tag = app_tag + [None]
try:
org, repo = org_repo
except Exception:
org, repo = find_org(org_repo)
return org, repo, tag
|
def fetch_details_from_tag(_tag: str) -> Tuple[str, str, str]:
app_tag = _tag.split("@")
org_repo = app_tag[0].split("/")
try:
repo, tag = app_tag
except ValueError:
repo, tag = app_tag + [None]
try:
org, repo = org_repo
except Exception:
org, repo = find_org(org_repo[0])
return org, repo, tag
|
17,753 |
def imshow(
img,
zmin=None,
zmax=None,
origin=None,
labels={},
x=None,
y=None,
animation_frame=None,
facet_col=None,
facet_col_wrap=None,
color_continuous_scale=None,
color_continuous_midpoint=None,
range_color=None,
title=None,
template=None,
width=None,
height=None,
aspect=None,
contrast_rescaling=None,
binary_string=None,
binary_backend="auto",
binary_compression_level=4,
binary_format="png",
):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
img: array-like image, or xarray
The image data. Supported array shapes are
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values.
- (M, N, 4): an image with RGBA values, i.e. including transparency.
zmin, zmax : scalar or iterable, optional
zmin and zmax define the scalar range that the colormap covers. By default,
zmin and zmax correspond to the min and max values of the datatype for integer
datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
a multichannel image of floats, the max of the image is computed and zmax is the
smallest power of 256 (1, 255, 65535) greater than this max value,
with a 5% tolerance. For a single-channel image, the max of the image is used.
Overridden by range_color.
origin : str, 'upper' or 'lower' (default 'upper')
position of the [0, 0] pixel of the image array, in the upper left or lower left
corner. The convention 'upper' is typically used for matrices and images.
labels : dict with str keys and str values (default `{}`)
Sets names used in the figure for axis titles (keys ``x`` and ``y``),
colorbar title and hoverlabel (key ``color``). The values should correspond
to the desired label to be displayed. If ``img`` is an xarray, dimension
names are used for axis titles, and long name for the colorbar title
(unless overridden in ``labels``). Possible keys are: x, y, and color.
x, y: list-like, optional
x and y are used to label the axes of single-channel heatmap visualizations and
their lengths must match the lengths of the second and first dimensions of the
img argument. They are auto-populated if the input is an xarray.
facet_col: int, optional (default None)
axis number along which the image array is slices to create a facetted plot.
facet_col_wrap: int
Maximum number of facet columns. Wraps the column variable at this width,
so that the column facets span multiple rows.
Ignored if `facet_col` is None.
color_continuous_scale : str or list of str
colormap used to map scalar data to colors (for a 2D image). This parameter is
not used for RGB or RGBA images. If a string is provided, it should be the name
of a known color scale, and if a list is provided, it should be a list of CSS-
compatible colors.
color_continuous_midpoint : number
If set, computes the bounds of the continuous color scale to have the desired
midpoint. Overridden by range_color or zmin and zmax.
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
for single-channel images.
title : str
The figure title.
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
width : number
The figure width in pixels.
height: number
The figure height in pixels.
aspect: 'equal', 'auto', or None
- 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
- 'auto': The axes is kept fixed and the aspect ratio of pixels is
adjusted so that the data fit in the axes. In general, this will
result in non-square pixels.
- if None, 'equal' is used for numpy arrays and 'auto' for xarrays
(which have typically heterogeneous coordinates)
contrast_rescaling: 'minmax', 'infer', or None
how to determine data values corresponding to the bounds of the color
range, when zmin or zmax are not passed. If `minmax`, the min and max
values of the image are used. If `infer`, a heuristic based on the image
data type is used.
binary_string: bool, default None
if True, the image data are first rescaled and encoded as uint8 and
then passed to plotly.js as a b64 PNG string. If False, data are passed
unchanged as a numerical array. Setting to True may lead to performance
gains, at the cost of a loss of precision depending on the original data
type. If None, use_binary_string is set to True for multichannel (eg) RGB
arrays, and to False for single-channel (2D) arrays. 2D arrays are
represented as grayscale and with no colorbar if use_binary_string is
True.
binary_backend: str, 'auto' (default), 'pil' or 'pypng'
Third-party package for the transformation of numpy arrays to
png b64 strings. If 'auto', Pillow is used if installed, otherwise
pypng.
binary_compression_level: int, between 0 and 9 (default 4)
png compression level to be passed to the backend when transforming an
array to a png b64 string. Increasing `binary_compression` decreases the
size of the png string, but the compression step takes more time. For most
images it is not worth using levels greater than 5, but it's possible to
test `len(fig.data[0].source)` and to time the execution of `imshow` to
tune the level of compression. 0 means no compression (not recommended).
binary_format: str, 'png' (default) or 'jpg'
compression format used to generate b64 string. 'png' is recommended
since it uses lossless compression, but 'jpg' (lossy) compression can
result if smaller binary strings for natural images.
Returns
-------
fig : graph_objects.Figure containing the displayed image
See also
--------
plotly.graph_objects.Image : image trace
plotly.graph_objects.Heatmap : heatmap trace
Notes
-----
In order to update and customize the returned figure, use
`go.Figure.update_traces` or `go.Figure.update_layout`.
If an xarray is passed, dimensions names and coordinates are used for
axes labels and ticks.
"""
args = locals()
apply_default_cascade(args)
labels = labels.copy()
nslices = 1
if facet_col is not None:
if isinstance(facet_col, str):
facet_col = img.dims.index(facet_col)
nslices = img.shape[facet_col]
ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices
nrows = nslices // ncols + 1 if nslices % ncols else nslices // ncols
else:
nrows = 1
ncols = 1
if animation_frame is not None:
if isinstance(animation_frame, str):
animation_frame = img.dims.index(animation_frame)
nslices = img.shape[animation_frame]
slice_through = (facet_col is not None) or (animation_frame is not None)
slice_label = None
slices = range(nslices)
# ----- Define x and y, set labels if img is an xarray -------------------
if xarray_imported and isinstance(img, xarray.DataArray):
# if binary_string:
# raise ValueError(
# "It is not possible to use binary image strings for xarrays."
# "Please pass your data as a numpy array instead using"
# "`img.values`"
# )
dims = list(img.dims)
if slice_through:
slice_index = facet_col if facet_col is not None else animation_frame
slices = img.coords[img.dims[slice_index]].values
_ = dims.pop(slice_index)
slice_label = img.dims[slice_index]
y_label, x_label = dims[0], dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if np.issubdtype(img.coords[ax].dtype, np.datetime64):
img.coords[ax] = img.coords[ax].astype(str)
if x is None:
x = img.coords[x_label]
if y is None:
y = img.coords[y_label]
if aspect is None:
aspect = "auto"
if labels.get("x", None) is None:
labels["x"] = x_label
if labels.get("y", None) is None:
labels["y"] = y_label
if labels.get("slice", None) is None:
labels["slice"] = slice_label
if labels.get("color", None) is None:
labels["color"] = xarray.plot.utils.label_from_attrs(img)
labels["color"] = labels["color"].replace("\n", "<br>")
else:
if hasattr(img, "columns") and hasattr(img.columns, "__len__"):
if x is None:
x = img.columns
if labels.get("x", None) is None and hasattr(img.columns, "name"):
labels["x"] = img.columns.name or ""
if hasattr(img, "index") and hasattr(img.index, "__len__"):
if y is None:
y = img.index
if labels.get("y", None) is None and hasattr(img.index, "name"):
labels["y"] = img.index.name or ""
if labels.get("x", None) is None:
labels["x"] = ""
if labels.get("y", None) is None:
labels["y"] = ""
if labels.get("color", None) is None:
labels["color"] = ""
if aspect is None:
aspect = "equal"
# --- Set the value of binary_string (forbidden for pandas)
if isinstance(img, pd.DataFrame):
if binary_string:
raise ValueError("Binary strings cannot be used with pandas arrays")
is_dataframe = True
else:
is_dataframe = False
# --------------- Starting from here img is always a numpy array --------
img = np.asanyarray(img)
if facet_col is not None:
img = np.moveaxis(img, facet_col, 0)
facet_col = True
if animation_frame is not None:
img = np.moveaxis(img, animation_frame, 0)
animation_frame = True
args["animation_frame"] = (
"slice" if labels.get("slice") is None else labels["slice"]
)
# Default behaviour of binary_string: True for RGB images, False for 2D
if binary_string is None:
if slice_through:
binary_string = img.ndim >= 4 and not is_dataframe
else:
binary_string = img.ndim >= 3 and not is_dataframe
# Cast bools to uint8 (also one byte)
if img.dtype == np.bool:
img = 255 * img.astype(np.uint8)
if range_color is not None:
zmin = range_color[0]
zmax = range_color[1]
# -------- Contrast rescaling: either minmax or infer ------------------
if contrast_rescaling is None:
contrast_rescaling = (
"minmax"
if (img.ndim == 2 or (img.ndim == 3 and slice_through))
else "infer"
)
# We try to set zmin and zmax only if necessary, because traces have good defaults
if contrast_rescaling == "minmax":
# When using binary_string and minmax we need to set zmin and zmax to rescale the image
if (zmin is not None or binary_string) and zmax is None:
zmax = img.max()
if (zmax is not None or binary_string) and zmin is None:
zmin = img.min()
else:
# For uint8 data and infer we let zmin and zmax to be None if passed as None
if zmax is None and img.dtype != np.uint8:
zmax = _infer_zmax_from_type(img)
if zmin is None and zmax is not None:
zmin = 0
# For 2d data, use Heatmap trace, unless binary_string is True
if (img.ndim == 2 or (img.ndim == 3 and slice_through)) and not binary_string:
y_index = 1 if slice_through else 0
if y is not None and img.shape[y_index] != len(y):
raise ValueError(
"The length of the y vector must match the length of the first "
+ "dimension of the img matrix."
)
x_index = 2 if slice_through else 1
if x is not None and img.shape[x_index] != len(x):
raise ValueError(
"The length of the x vector must match the length of the second "
+ "dimension of the img matrix."
)
if slice_through:
traces = [
go.Heatmap(x=x, y=y, z=img_slice, coloraxis="coloraxis1", name=str(i))
for i, img_slice in enumerate(img)
]
else:
traces = [go.Heatmap(x=x, y=y, z=img, coloraxis="coloraxis1")]
autorange = True if origin == "lower" else "reversed"
layout = dict(yaxis=dict(autorange=autorange))
if aspect == "equal":
layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
layout["yaxis"]["constrain"] = "domain"
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=color_continuous_midpoint,
cmin=zmin,
cmax=zmax,
)
if labels["color"]:
layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
# For 2D+RGB data, use Image trace
elif (
img.ndim >= 3 and (img.shape[-1] in [3, 4] or slice_through and binary_string)
) or (img.ndim == 2 and binary_string):
rescale_image = True # to check whether image has been modified
if zmin is not None and zmax is not None:
zmin, zmax = (
_vectorize_zvalue(zmin, mode="min"),
_vectorize_zvalue(zmax, mode="max"),
)
if binary_string:
if zmin is None and zmax is None: # no rescaling, faster
img_rescaled = img
rescale_image = False
elif img.ndim == 2 or (img.ndim == 3 and slice_through):
img_rescaled = rescale_intensity(
img, in_range=(zmin[0], zmax[0]), out_range=np.uint8
)
else:
img_rescaled = np.stack(
[
rescale_intensity(
img[..., ch],
in_range=(zmin[ch], zmax[ch]),
out_range=np.uint8,
)
for ch in range(img.shape[-1])
],
axis=-1,
)
if slice_through:
img_str = [
_array_to_b64str(
img_rescaled_slice,
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
for img_rescaled_slice in img_rescaled
]
else:
img_str = [
_array_to_b64str(
img_rescaled,
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
]
traces = [
go.Image(source=img_str_slice, name=str(i))
for i, img_str_slice in enumerate(img_str)
]
else:
colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"
if slice_through:
traces = [
go.Image(z=img_slice, zmin=zmin, zmax=zmax, colormodel=colormodel)
for img_slice in img
]
else:
traces = [go.Image(z=img, zmin=zmin, zmax=zmax, colormodel=colormodel)]
layout = {}
if origin == "lower":
layout["yaxis"] = dict(autorange=True)
else:
raise ValueError(
"px.imshow only accepts 2D single-channel, RGB or RGBA images. "
"An image of shape %s was provided"
"Alternatively, 3-D single or multichannel datasets can be"
"visualized using the `facet_col` or `animation_frame` arguments."
% str(img.shape)
)
# Now build figure
col_labels = []
if facet_col is not None:
slice_label = "slice" if labels.get("slice") is None else labels["slice"]
if slices is None:
slices = range(nslices)
col_labels = ["%s = %d" % (slice_label, i) for i in slices]
fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])
layout_patch = dict()
for attr_name in ["height", "width"]:
if args[attr_name]:
layout_patch[attr_name] = args[attr_name]
if args["title"]:
layout_patch["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout_patch["margin"] = {"t": 60}
frame_list = []
for index, (slice_index, trace) in enumerate(zip(slices, traces)):
if facet_col or index == 0:
fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)
if animation_frame:
frame_list.append(dict(data=trace, layout=layout, name=str(slice_index)))
if animation_frame:
fig.frames = frame_list
fig.update_layout(layout)
fig.update_layout(layout_patch)
# Hover name, z or color
if binary_string and rescale_image and not np.all(img == img_rescaled):
# we rescaled the image, hence z is not displayed in hover since it does
# not correspond to img values
hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
)
else:
if trace["type"] == "heatmap":
hover_name = "%{z}"
elif img.ndim == 2:
hover_name = "%{z[0]}"
elif img.ndim == 3 and img.shape[-1] == 3:
hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"
else:
hover_name = "%{z}"
hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
labels["color"] or "color",
hover_name,
)
fig.update_traces(hovertemplate=hovertemplate)
if labels["x"]:
fig.update_xaxes(title_text=labels["x"])
if labels["y"]:
fig.update_yaxes(title_text=labels["y"])
configure_animation_controls(args, go.Image, fig)
# fig.update_layout(template=args["template"], overwrite=True)
return fig
|
def imshow(
img,
zmin=None,
zmax=None,
origin=None,
labels={},
x=None,
y=None,
animation_frame=None,
facet_col=None,
facet_col_wrap=None,
color_continuous_scale=None,
color_continuous_midpoint=None,
range_color=None,
title=None,
template=None,
width=None,
height=None,
aspect=None,
contrast_rescaling=None,
binary_string=None,
binary_backend="auto",
binary_compression_level=4,
binary_format="png",
):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
img: array-like image, or xarray
The image data. Supported array shapes are
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values.
- (M, N, 4): an image with RGBA values, i.e. including transparency.
zmin, zmax : scalar or iterable, optional
zmin and zmax define the scalar range that the colormap covers. By default,
zmin and zmax correspond to the min and max values of the datatype for integer
datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
a multichannel image of floats, the max of the image is computed and zmax is the
smallest power of 256 (1, 255, 65535) greater than this max value,
with a 5% tolerance. For a single-channel image, the max of the image is used.
Overridden by range_color.
origin : str, 'upper' or 'lower' (default 'upper')
position of the [0, 0] pixel of the image array, in the upper left or lower left
corner. The convention 'upper' is typically used for matrices and images.
labels : dict with str keys and str values (default `{}`)
Sets names used in the figure for axis titles (keys ``x`` and ``y``),
colorbar title and hoverlabel (key ``color``). The values should correspond
to the desired label to be displayed. If ``img`` is an xarray, dimension
names are used for axis titles, and long name for the colorbar title
(unless overridden in ``labels``). Possible keys are: x, y, and color.
x, y: list-like, optional
x and y are used to label the axes of single-channel heatmap visualizations and
their lengths must match the lengths of the second and first dimensions of the
img argument. They are auto-populated if the input is an xarray.
facet_col: int, optional (default None)
axis number along which the image array is slices to create a facetted plot.
facet_col_wrap: int
Maximum number of facet columns. Wraps the column variable at this width,
so that the column facets span multiple rows.
Ignored if `facet_col` is None.
color_continuous_scale : str or list of str
colormap used to map scalar data to colors (for a 2D image). This parameter is
not used for RGB or RGBA images. If a string is provided, it should be the name
of a known color scale, and if a list is provided, it should be a list of CSS-
compatible colors.
color_continuous_midpoint : number
If set, computes the bounds of the continuous color scale to have the desired
midpoint. Overridden by range_color or zmin and zmax.
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
for single-channel images.
title : str
The figure title.
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
width : number
The figure width in pixels.
height: number
The figure height in pixels.
aspect: 'equal', 'auto', or None
- 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
- 'auto': The axes is kept fixed and the aspect ratio of pixels is
adjusted so that the data fit in the axes. In general, this will
result in non-square pixels.
- if None, 'equal' is used for numpy arrays and 'auto' for xarrays
(which have typically heterogeneous coordinates)
contrast_rescaling: 'minmax', 'infer', or None
how to determine data values corresponding to the bounds of the color
range, when zmin or zmax are not passed. If `minmax`, the min and max
values of the image are used. If `infer`, a heuristic based on the image
data type is used.
binary_string: bool, default None
if True, the image data are first rescaled and encoded as uint8 and
then passed to plotly.js as a b64 PNG string. If False, data are passed
unchanged as a numerical array. Setting to True may lead to performance
gains, at the cost of a loss of precision depending on the original data
type. If None, use_binary_string is set to True for multichannel (eg) RGB
arrays, and to False for single-channel (2D) arrays. 2D arrays are
represented as grayscale and with no colorbar if use_binary_string is
True.
binary_backend: str, 'auto' (default), 'pil' or 'pypng'
Third-party package for the transformation of numpy arrays to
png b64 strings. If 'auto', Pillow is used if installed, otherwise
pypng.
binary_compression_level: int, between 0 and 9 (default 4)
png compression level to be passed to the backend when transforming an
array to a png b64 string. Increasing `binary_compression` decreases the
size of the png string, but the compression step takes more time. For most
images it is not worth using levels greater than 5, but it's possible to
test `len(fig.data[0].source)` and to time the execution of `imshow` to
tune the level of compression. 0 means no compression (not recommended).
binary_format: str, 'png' (default) or 'jpg'
compression format used to generate b64 string. 'png' is recommended
since it uses lossless compression, but 'jpg' (lossy) compression can
result if smaller binary strings for natural images.
Returns
-------
fig : graph_objects.Figure containing the displayed image
See also
--------
plotly.graph_objects.Image : image trace
plotly.graph_objects.Heatmap : heatmap trace
Notes
-----
In order to update and customize the returned figure, use
`go.Figure.update_traces` or `go.Figure.update_layout`.
If an xarray is passed, dimensions names and coordinates are used for
axes labels and ticks.
"""
args = locals()
apply_default_cascade(args)
labels = labels.copy()
nslices = 1
if facet_col is not None:
if isinstance(facet_col, str):
facet_col = img.dims.index(facet_col)
nslices = img.shape[facet_col]
ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices
nrows = nslices // ncols + 1 if nslices % ncols else nslices // ncols
else:
nrows = 1
ncols = 1
if animation_frame is not None:
if isinstance(animation_frame, str):
animation_frame = img.dims.index(animation_frame)
nslices = img.shape[animation_frame]
slice_through = (facet_col is not None) or (animation_frame is not None)
slice_label = None
slices = range(nslices)
# ----- Define x and y, set labels if img is an xarray -------------------
if xarray_imported and isinstance(img, xarray.DataArray):
# if binary_string:
# raise ValueError(
# "It is not possible to use binary image strings for xarrays."
# "Please pass your data as a numpy array instead using"
# "`img.values`"
# )
dims = list(img.dims)
if slice_through:
slice_index = facet_col if facet_col is not None else animation_frame
slices = img.coords[img.dims[slice_index]].values
_ = dims.pop(slice_index)
slice_label = img.dims[slice_index]
y_label, x_label = dims[0], dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if np.issubdtype(img.coords[ax].dtype, np.datetime64):
img.coords[ax] = img.coords[ax].astype(str)
if x is None:
x = img.coords[x_label]
if y is None:
y = img.coords[y_label]
if aspect is None:
aspect = "auto"
if labels.get("x", None) is None:
labels["x"] = x_label
if labels.get("y", None) is None:
labels["y"] = y_label
if labels.get("slice", None) is None:
labels["slice"] = slice_label
if labels.get("color", None) is None:
labels["color"] = xarray.plot.utils.label_from_attrs(img)
labels["color"] = labels["color"].replace("\n", "<br>")
else:
if hasattr(img, "columns") and hasattr(img.columns, "__len__"):
if x is None:
x = img.columns
if labels.get("x", None) is None and hasattr(img.columns, "name"):
labels["x"] = img.columns.name or ""
if hasattr(img, "index") and hasattr(img.index, "__len__"):
if y is None:
y = img.index
if labels.get("y", None) is None and hasattr(img.index, "name"):
labels["y"] = img.index.name or ""
if labels.get("x", None) is None:
labels["x"] = ""
if labels.get("y", None) is None:
labels["y"] = ""
if labels.get("color", None) is None:
labels["color"] = ""
if aspect is None:
aspect = "equal"
# --- Set the value of binary_string (forbidden for pandas)
if isinstance(img, pd.DataFrame):
if binary_string:
raise ValueError("Binary strings cannot be used with pandas arrays")
is_dataframe = True
else:
is_dataframe = False
# --------------- Starting from here img is always a numpy array --------
img = np.asanyarray(img)
if facet_col is not None:
img = np.moveaxis(img, facet_col, 0)
facet_col = True
if animation_frame is not None:
img = np.moveaxis(img, animation_frame, 0)
animation_frame = True
args["animation_frame"] = (
"slice" if labels.get("slice") is None else labels["slice"]
)
# Default behaviour of binary_string: True for RGB images, False for 2D
if binary_string is None:
if slice_through:
binary_string = img.ndim >= 4 and not is_dataframe
else:
binary_string = img.ndim >= 3 and not is_dataframe
# Cast bools to uint8 (also one byte)
if img.dtype == np.bool:
img = 255 * img.astype(np.uint8)
if range_color is not None:
zmin = range_color[0]
zmax = range_color[1]
# -------- Contrast rescaling: either minmax or infer ------------------
if contrast_rescaling is None:
contrast_rescaling = (
"minmax"
if (img.ndim == 2 or (img.ndim == 3 and slice_through))
else "infer"
)
# We try to set zmin and zmax only if necessary, because traces have good defaults
if contrast_rescaling == "minmax":
# When using binary_string and minmax we need to set zmin and zmax to rescale the image
if (zmin is not None or binary_string) and zmax is None:
zmax = img.max()
if (zmax is not None or binary_string) and zmin is None:
zmin = img.min()
else:
# For uint8 data and infer we let zmin and zmax to be None if passed as None
if zmax is None and img.dtype != np.uint8:
zmax = _infer_zmax_from_type(img)
if zmin is None and zmax is not None:
zmin = 0
# For 2d data, use Heatmap trace, unless binary_string is True
if (img.ndim == 2 or (img.ndim == 3 and slice_through)) and not binary_string:
y_index = 1 if slice_through else 0
if y is not None and img.shape[y_index] != len(y):
raise ValueError(
"The length of the y vector must match the length of the first "
+ "dimension of the img matrix."
)
x_index = 2 if slice_through else 1
if x is not None and img.shape[x_index] != len(x):
raise ValueError(
"The length of the x vector must match the length of the second "
+ "dimension of the img matrix."
)
if slice_through:
traces = [
go.Heatmap(x=x, y=y, z=img_slice, coloraxis="coloraxis1", name=str(i))
for i, img_slice in enumerate(img)
]
else:
traces = [go.Heatmap(x=x, y=y, z=img, coloraxis="coloraxis1")]
autorange = True if origin == "lower" else "reversed"
layout = dict(yaxis=dict(autorange=autorange))
if aspect == "equal":
layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
layout["yaxis"]["constrain"] = "domain"
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=color_continuous_midpoint,
cmin=zmin,
cmax=zmax,
)
if labels["color"]:
layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
# For 2D+RGB data, use Image trace
elif (
img.ndim >= 3 and (img.shape[-1] in [3, 4] or slice_through and binary_string)
) or (img.ndim == 2 and binary_string):
rescale_image = True # to check whether image has been modified
if zmin is not None and zmax is not None:
zmin, zmax = (
_vectorize_zvalue(zmin, mode="min"),
_vectorize_zvalue(zmax, mode="max"),
)
if binary_string:
if zmin is None and zmax is None: # no rescaling, faster
img_rescaled = img
rescale_image = False
elif img.ndim == 2 or (img.ndim == 3 and slice_through):
img_rescaled = rescale_intensity(
img, in_range=(zmin[0], zmax[0]), out_range=np.uint8
)
else:
img_rescaled = np.stack(
[
rescale_intensity(
img[..., ch],
in_range=(zmin[ch], zmax[ch]),
out_range=np.uint8,
)
for ch in range(img.shape[-1])
],
axis=-1,
)
if slice_through:
img_str = [
_array_to_b64str(
img_rescaled_slice,
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
for img_rescaled_slice in img_rescaled
]
else:
img_str = [
_array_to_b64str(
img_rescaled,
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
]
traces = [
go.Image(source=img_str_slice, name=str(i))
for i, img_str_slice in enumerate(img_str)
]
else:
colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"
if slice_through:
traces = [
go.Image(z=img_slice, zmin=zmin, zmax=zmax, colormodel=colormodel)
for img_slice in img
]
else:
traces = [go.Image(z=img, zmin=zmin, zmax=zmax, colormodel=colormodel)]
layout = {}
if origin == "lower":
layout["yaxis"] = dict(autorange=True)
else:
raise ValueError(
"px.imshow only accepts 2D single-channel, RGB or RGBA images. "
"An image of shape %s was provided."
"Alternatively, 3-D single or multichannel datasets can be"
"visualized using the `facet_col` or `animation_frame` arguments."
% str(img.shape)
)
# Now build figure
col_labels = []
if facet_col is not None:
slice_label = "slice" if labels.get("slice") is None else labels["slice"]
if slices is None:
slices = range(nslices)
col_labels = ["%s = %d" % (slice_label, i) for i in slices]
fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])
layout_patch = dict()
for attr_name in ["height", "width"]:
if args[attr_name]:
layout_patch[attr_name] = args[attr_name]
if args["title"]:
layout_patch["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout_patch["margin"] = {"t": 60}
frame_list = []
for index, (slice_index, trace) in enumerate(zip(slices, traces)):
if facet_col or index == 0:
fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)
if animation_frame:
frame_list.append(dict(data=trace, layout=layout, name=str(slice_index)))
if animation_frame:
fig.frames = frame_list
fig.update_layout(layout)
fig.update_layout(layout_patch)
# Hover name, z or color
if binary_string and rescale_image and not np.all(img == img_rescaled):
# we rescaled the image, hence z is not displayed in hover since it does
# not correspond to img values
hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
)
else:
if trace["type"] == "heatmap":
hover_name = "%{z}"
elif img.ndim == 2:
hover_name = "%{z[0]}"
elif img.ndim == 3 and img.shape[-1] == 3:
hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"
else:
hover_name = "%{z}"
hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
labels["color"] or "color",
hover_name,
)
fig.update_traces(hovertemplate=hovertemplate)
if labels["x"]:
fig.update_xaxes(title_text=labels["x"])
if labels["y"]:
fig.update_yaxes(title_text=labels["y"])
configure_animation_controls(args, go.Image, fig)
# fig.update_layout(template=args["template"], overwrite=True)
return fig
|
33,726 |
def trial_progress_str(trials,
metric_columns,
parameter_columns=None,
total_samples=0,
fmt="psql",
max_rows=None):
"""Returns a human readable message for printing to the console.
This contains a table where each row represents a trial, its parameters
and the current values of its metrics.
Args:
trials (list[Trial]): List of trials to get progress string for.
metric_columns (dict[str, str]|list[str]): Names of metrics to include.
If this is a dict, the keys are metric names and the values are
the names to use in the message. If this is a list, the metric
name is used in the message directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include. If this is a dict, the keys are parameter names and the
values are the names to use in the message. If this is a list,
the parameter name is used in the message directly. If this is
empty, all parameters are used in the message.
total_samples (int): Total number of trials that will be generated.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the trial table. Defaults to
unlimited.
"""
messages = []
delim = "<br>" if fmt == "html" else "\n"
if len(trials) < 1:
return delim.join(messages)
num_trials = len(trials)
trials_by_state = collections.defaultdict(list)
for t in trials:
trials_by_state[t.status].append(t)
for local_dir in sorted({t.local_dir for t in trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_strs = [
"{} {}".format(len(trials_by_state[state]), state)
for state in sorted(trials_by_state)
]
state_tbl_oder = [
Trial.RUNNING, Trial.PAUSED, Trial.PENDING, Trial.TERMINATED,
Trial.ERROR
]
max_rows = max_rows or float("inf")
if num_trials > max_rows:
# TODO(ujvl): suggestion for users to view more rows.
trials_by_state_trunc = _fair_filter_trials(trials_by_state, max_rows)
trials = []
overflow_strs = []
for state in state_tbl_oder:
if state not in trials_by_state:
continue
trials += trials_by_state_trunc[state]
num = len(trials_by_state[state]) - len(
trials_by_state_trunc[state])
if num > 0:
overflow_strs.append("{} {}".format(num, state))
# Build overflow string.
overflow = num_trials - max_rows
overflow_str = ", ".join(overflow_strs)
else:
overflow = False
trials = []
for state in state_tbl_oder:
if state not in trials_by_state:
continue
trials += trials_by_state[state]
if total_samples and total_samples >= sys.maxsize:
total_samples = "infinite"
messages.append("Number of trials: {}{} ({})".format(
num_trials, f"/{total_samples}"
if total_samples else "", ", ".join(num_trials_strs)))
# Pre-process trials to figure out what columns to show.
if isinstance(metric_columns, Mapping):
metric_keys = list(metric_columns.keys())
else:
metric_keys = metric_columns
metric_keys = [
k for k in metric_keys if any(
t.last_result.get(k) is not None for t in trials)
]
if not parameter_columns:
parameter_keys = sorted(
set().union(*[t.evaluated_params for t in trials]))
elif isinstance(parameter_columns, Mapping):
parameter_keys = list(parameter_columns.keys())
else:
parameter_keys = parameter_columns
# Build trial rows.
trial_table = [
_get_trial_info(trial, parameter_keys, metric_keys) for trial in trials
]
# Format column headings
if isinstance(metric_columns, Mapping):
formatted_metric_columns = [metric_columns[k] for k in metric_keys]
else:
formatted_metric_columns = metric_keys
if isinstance(parameter_columns, Mapping):
formatted_parameter_columns = [
parameter_columns[k] for k in parameter_keys
]
else:
formatted_parameter_columns = parameter_keys
columns = (["Trial name", "status", "loc"] + formatted_parameter_columns +
formatted_metric_columns)
# Tabulate.
messages.append(
tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False))
if overflow:
messages.append("... {} more trials not shown ({})".format(
overflow, overflow_str))
return delim.join(messages)
|
def trial_progress_str(trials,
metric_columns,
parameter_columns=None,
total_samples=0,
fmt="psql",
max_rows=None):
"""Returns a human readable message for printing to the console.
This contains a table where each row represents a trial, its parameters
and the current values of its metrics.
Args:
trials (list[Trial]): List of trials to get progress string for.
metric_columns (dict[str, str]|list[str]): Names of metrics to include.
If this is a dict, the keys are metric names and the values are
the names to use in the message. If this is a list, the metric
name is used in the message directly.
parameter_columns (dict[str, str]|list[str]): Names of parameters to
include. If this is a dict, the keys are parameter names and the
values are the names to use in the message. If this is a list,
the parameter name is used in the message directly. If this is
empty, all parameters are used in the message.
total_samples (int): Total number of trials that will be generated.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the trial table. Defaults to
unlimited.
"""
messages = []
delim = "<br>" if fmt == "html" else "\n"
if len(trials) < 1:
return delim.join(messages)
num_trials = len(trials)
trials_by_state = collections.defaultdict(list)
for t in trials:
trials_by_state[t.status].append(t)
for local_dir in sorted({t.local_dir for t in trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_strs = [
"{} {}".format(len(trials_by_state[state]), state)
for state in sorted(trials_by_state)
]
state_tbl_order = [
Trial.RUNNING, Trial.PAUSED, Trial.PENDING, Trial.TERMINATED,
Trial.ERROR
]
max_rows = max_rows or float("inf")
if num_trials > max_rows:
# TODO(ujvl): suggestion for users to view more rows.
trials_by_state_trunc = _fair_filter_trials(trials_by_state, max_rows)
trials = []
overflow_strs = []
for state in state_tbl_oder:
if state not in trials_by_state:
continue
trials += trials_by_state_trunc[state]
num = len(trials_by_state[state]) - len(
trials_by_state_trunc[state])
if num > 0:
overflow_strs.append("{} {}".format(num, state))
# Build overflow string.
overflow = num_trials - max_rows
overflow_str = ", ".join(overflow_strs)
else:
overflow = False
trials = []
for state in state_tbl_oder:
if state not in trials_by_state:
continue
trials += trials_by_state[state]
if total_samples and total_samples >= sys.maxsize:
total_samples = "infinite"
messages.append("Number of trials: {}{} ({})".format(
num_trials, f"/{total_samples}"
if total_samples else "", ", ".join(num_trials_strs)))
# Pre-process trials to figure out what columns to show.
if isinstance(metric_columns, Mapping):
metric_keys = list(metric_columns.keys())
else:
metric_keys = metric_columns
metric_keys = [
k for k in metric_keys if any(
t.last_result.get(k) is not None for t in trials)
]
if not parameter_columns:
parameter_keys = sorted(
set().union(*[t.evaluated_params for t in trials]))
elif isinstance(parameter_columns, Mapping):
parameter_keys = list(parameter_columns.keys())
else:
parameter_keys = parameter_columns
# Build trial rows.
trial_table = [
_get_trial_info(trial, parameter_keys, metric_keys) for trial in trials
]
# Format column headings
if isinstance(metric_columns, Mapping):
formatted_metric_columns = [metric_columns[k] for k in metric_keys]
else:
formatted_metric_columns = metric_keys
if isinstance(parameter_columns, Mapping):
formatted_parameter_columns = [
parameter_columns[k] for k in parameter_keys
]
else:
formatted_parameter_columns = parameter_keys
columns = (["Trial name", "status", "loc"] + formatted_parameter_columns +
formatted_metric_columns)
# Tabulate.
messages.append(
tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False))
if overflow:
messages.append("... {} more trials not shown ({})".format(
overflow, overflow_str))
return delim.join(messages)
|
3,201 |
def __get_range_chunked(issue_ids, start, stop, rollup):
combined = {}
for chunk in chunked(issue_ids, GET_RANGE_BATCH_SIZE):
combined.update(tsdb.get_range(tsdb.models.group, list(chunk), start, stop, rollup=rollup))
return combined
|
def _get_range_chunked(issue_ids, start, stop, rollup):
combined = {}
for chunk in chunked(issue_ids, GET_RANGE_BATCH_SIZE):
combined.update(tsdb.get_range(tsdb.models.group, list(chunk), start, stop, rollup=rollup))
return combined
|
39,753 |
def numpy_img2d_color_std(img, seg, means=None):
""" compute color STD by numpy
:param ndarray img: input RGB image
:param ndarray seg: segmentation og the image
:param ndarray means: precomputed feature means
:return: np.array<nb_lbs, 3> matrix features per segment
.. seealso:: :func:`imsegm.descriptors.cython_img2d_color_std`
>>> image = np.zeros((2, 10, 3))
>>> image[:, 2:6, 0] = 1
>>> image[:, 3:8, 1] = 3
>>> image[:, 4:9, 2] = 2
>>> segm = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
... [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> numpy_img2d_color_std(image, segm)
array([[ 0.48989795, 1.46969385, 0.8 ],
[ 0.4 , 1.46969385, 0.8 ]])
"""
logging.debug('computing Colour STD for image %r & segm %r with'
' %i segments', img.shape, seg.shape, np.max(seg))
_check_color_image_segm(img, seg)
if means is None:
means = numpy_img2d_color_mean(img, seg)
nb_labels = np.max(seg) + 1
if len(means) < nb_labels:
raise ValueError('number of means (%i) should be equal to number of labels (%i)' % (len(means), nb_labels))
variations = np.zeros((nb_labels, 3))
counts = np.zeros(nb_labels)
for i in range(seg.shape[0]):
for j in range(seg.shape[1]):
lb = seg[i, j]
variations[lb, :] += (img[i, j, :] - means[lb, :])**2
counts[lb] += 1
# prevent dividing by 0
counts[counts == 0] = -1
variations = (variations / np.tile(counts, (3, 1)).T.astype(float))
# preventing negative zeros
variations[variations == 0] = 0
stds = np.sqrt(variations)
return stds
|
def numpy_img2d_color_std(img, seg, means=None):
""" compute color STD by numpy
:param ndarray img: input RGB image
:param ndarray seg: segmentation og the image
:param ndarray means: precomputed feature means
:return: np.array<nb_lbs, 3> matrix features per segment
.. seealso:: :func:`imsegm.descriptors.cython_img2d_color_std`
>>> image = np.zeros((2, 10, 3))
>>> image[:, 2:6, 0] = 1
>>> image[:, 3:8, 1] = 3
>>> image[:, 4:9, 2] = 2
>>> segm = np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
... [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]])
>>> numpy_img2d_color_std(image, segm)
array([[ 0.48989795, 1.46969385, 0.8 ],
[ 0.4 , 1.46969385, 0.8 ]])
"""
logging.debug('computing Colour STD for image %r & segm %r with %i segments', img.shape, seg.shape, np.max(seg))
_check_color_image_segm(img, seg)
if means is None:
means = numpy_img2d_color_mean(img, seg)
nb_labels = np.max(seg) + 1
if len(means) < nb_labels:
raise ValueError('number of means (%i) should be equal to number of labels (%i)' % (len(means), nb_labels))
variations = np.zeros((nb_labels, 3))
counts = np.zeros(nb_labels)
for i in range(seg.shape[0]):
for j in range(seg.shape[1]):
lb = seg[i, j]
variations[lb, :] += (img[i, j, :] - means[lb, :])**2
counts[lb] += 1
# prevent dividing by 0
counts[counts == 0] = -1
variations = (variations / np.tile(counts, (3, 1)).T.astype(float))
# preventing negative zeros
variations[variations == 0] = 0
stds = np.sqrt(variations)
return stds
|
2,972 |
def load(fh, encoding=None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
def load(fh, encoding: Optional[str] = None, is_verbose: bool = False):
"""
Load a pickle, with a provided encoding,
Parameters
----------
fh : a filelike object
encoding : an optional encoding
is_verbose : show exception output
"""
try:
fh.seek(0)
if encoding is not None:
up = Unpickler(fh, encoding=encoding)
else:
up = Unpickler(fh)
up.is_verbose = is_verbose
return up.load()
except (ValueError, TypeError):
raise
|
55,675 |
def adjust_hue(img: np.ndarray, hue_factor: float) -> np.ndarray:
"""Adjust hue of an image. The image hue is adjusted by converting the
image to HSV and cyclically shifting the intensities in the hue channel
(H). The image is then converted back to original image mode. `hue_factor`
is the amount of shift in H channel and must be in the interval `[-0.5,
0.5]`.
Args:
img (ndarray): Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
ndarray: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f'hue_factor:{hue_factor} is not in [-0.5, 0.5].')
if not (isinstance(img, np.ndarray) and (img.ndim in {2, 3})):
raise TypeError('img should be ndarray with dim=[2 or 3].')
dtype = img.dtype
img = img.astype(np.uint8)
hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV_FULL)
h, s, v = cv2.split(hsv_img)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
h += np.uint8(hue_factor * 255)
hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype)
|
def adjust_hue(img: np.ndarray, hue_factor: float) -> np.ndarray:
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and cyclically
shifting the intensities in the hue channel (H). The image is then converted
back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the interval `[-0.5,
0.5]`.
Args:
img (ndarray): Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
ndarray: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f'hue_factor:{hue_factor} is not in [-0.5, 0.5].')
if not (isinstance(img, np.ndarray) and (img.ndim in {2, 3})):
raise TypeError('img should be ndarray with dim=[2 or 3].')
dtype = img.dtype
img = img.astype(np.uint8)
hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV_FULL)
h, s, v = cv2.split(hsv_img)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
h += np.uint8(hue_factor * 255)
hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype)
|
18,497 |
def make_argument_parser(**kwargs):
"""Create an basic argument parser without any subcommands added."""
parser = SpackArgumentParser(
formatter_class=SpackHelpFormatter, add_help=False,
description=(
"A flexible package manager that supports multiple versions,\n"
"configurations, platforms, and compilers."),
**kwargs)
# stat names in groups of 7, for nice wrapping.
stat_lines = list(zip(*(iter(stat_names),) * 7))
parser.add_argument(
'-h', '--help',
dest='help', action='store_const', const='short', default=None,
help="show this help message and exit")
parser.add_argument(
'-H', '--all-help',
dest='help', action='store_const', const='long', default=None,
help="show help for all commands (same as spack help --all)")
parser.add_argument(
'--color', action='store', default='auto',
choices=('always', 'never', 'auto'),
help="when to colorize output (default: auto)")
parser.add_argument(
'-C', '--config-scope', dest='config_scopes', action='append',
metavar='DIR', help="add a custom configuration scope")
parser.add_argument(
'-d', '--debug', action='store_true',
help="write out basic debug messages")
parser.add_argument(
'-dd', action='store_true',
help="write out basic and standard debug messages")
parser.add_argument(
'-ddd', action='store_true',
help="write out basic, standard and detailed debug messages")
parser.add_argument(
'-dddd', action='store_true', help="write out all debug messages")
parser.add_argument(
'--timestamp', action='store_true',
help="Add a timestamp to tty output")
parser.add_argument(
'--pdb', action='store_true',
help="run spack under the pdb debugger")
env_group = parser.add_mutually_exclusive_group()
env_group.add_argument(
'-e', '--env', dest='env', metavar='ENV', action='store',
help="run with a specific environment (see spack env)")
env_group.add_argument(
'-D', '--env-dir', dest='env_dir', metavar='DIR', action='store',
help="run with an environment directory (ignore named environments)")
env_group.add_argument(
'-E', '--no-env', dest='no_env', action='store_true',
help="run without any environments activated (see spack env)")
parser.add_argument(
'--use-env-repo', action='store_true',
help="when running in an environment, use its package repository")
parser.add_argument(
'-k', '--insecure', action='store_true',
help="do not check ssl certificates when downloading")
parser.add_argument(
'-l', '--enable-locks', action='store_true', dest='locks',
default=None, help="use filesystem locking (default)")
parser.add_argument(
'-L', '--disable-locks', action='store_false', dest='locks',
help="do not use filesystem locking (unsafe)")
parser.add_argument(
'-m', '--mock', action='store_true',
help="use mock packages instead of real ones")
parser.add_argument(
'-p', '--profile', action='store_true', dest='spack_profile',
help="profile execution using cProfile")
parser.add_argument(
'--sorted-profile', default=None, metavar="STAT",
help="profile and sort by one or more of:\n[%s]" %
',\n '.join([', '.join(line) for line in stat_lines]))
parser.add_argument(
'--lines', default=20, action='store',
help="lines of profile output or 'all' (default: 20)")
parser.add_argument(
'-v', '--verbose', action='store_true',
help="print additional, verbose output")
parser.add_argument(
'--stacktrace', action='store_true',
help="add stacktraces to all printed statements")
parser.add_argument(
'-V', '--version', action='store_true',
help='show version number and exit')
parser.add_argument(
'--print-shell-vars', action='store',
help="print info needed by setup-env.[c]sh")
return parser
|
def make_argument_parser(**kwargs):
"""Create an basic argument parser without any subcommands added."""
parser = SpackArgumentParser(
formatter_class=SpackHelpFormatter, add_help=False,
description=(
"A flexible package manager that supports multiple versions,\n"
"configurations, platforms, and compilers."),
**kwargs)
# stat names in groups of 7, for nice wrapping.
stat_lines = list(zip(*(iter(stat_names),) * 7))
parser.add_argument(
'-h', '--help',
dest='help', action='store_const', const='short', default=None,
help="show this help message and exit")
parser.add_argument(
'-H', '--all-help',
dest='help', action='store_const', const='long', default=None,
help="show help for all commands (same as spack help --all)")
parser.add_argument(
'--color', action='store', default='auto',
choices=('always', 'never', 'auto'),
help="when to colorize output (default: auto)")
parser.add_argument(
'-C', '--config-scope', dest='config_scopes', action='append',
metavar='DIR', help="add a custom configuration scope")
parser.add_argument(
'-d', '--debug', action='store_true',
help="write out basic debug messages")
parser.add_argument(
'-dd', action='store_true',
help="write out basic and standard debug messages")
parser.add_argument(
'-ddd', action='store_true',
help="write out basic, standard and detailed debug messages")
parser.add_argument(
'-dddd', action='store_true', help="write out all debug messages")
parser.add_argument(
'--timestamp', action='store_true',
help="Add a timestamp to tty output")
parser.add_argument(
'--pdb', action='store_true',
help="run spack under the pdb debugger")
env_group = parser.add_mutually_exclusive_group()
env_group.add_argument(
'-e', '--env', dest='env', metavar='ENV', action='store',
help="run with a specific environment (see spack env)")
env_group.add_argument(
'-D', '--env-dir', dest='env_dir', metavar='DIR', action='store',
help="run with an environment directory (ignore named environments)")
env_group.add_argument(
'-E', '--no-env', dest='no_env', action='store_true',
help="run without any environments activated (see spack env)")
parser.add_argument(
'--use-env-repo', action='store_true',
help="when running in an environment, use its package repository")
parser.add_argument(
'-k', '--insecure', action='store_true',
help="do not check ssl certificates when downloading")
parser.add_argument(
'-l', '--enable-locks', action='store_true', dest='locks',
default=None, help="use filesystem locking (default)")
parser.add_argument(
'-L', '--disable-locks', action='store_false', dest='locks',
help="do not use filesystem locking (unsafe)")
parser.add_argument(
'-m', '--mock', action='store_true',
help="use mock packages instead of real ones")
parser.add_argument(
'-p', '--profile', action='store_true', dest='spack_profile',
help="profile execution using cProfile")
parser.add_argument(
'--sorted-profile', default=None, metavar="STAT",
help="profile and sort by one or more of:\n[%s]" %
',\n '.join([', '.join(line) for line in stat_lines]))
parser.add_argument(
'--lines', default=20, action='store',
help="lines of profile output or 'all' (default: 20)")
parser.add_argument(
'-v', '--verbose', action='store_true',
help="tee build system output to stdout")
parser.add_argument(
'--stacktrace', action='store_true',
help="add stacktraces to all printed statements")
parser.add_argument(
'-V', '--version', action='store_true',
help='show version number and exit')
parser.add_argument(
'--print-shell-vars', action='store',
help="print info needed by setup-env.[c]sh")
return parser
|
44,322 |
def validate_algorithm_spec(algorithm_settings: list[api_pb2.AlgorithmSetting]) -> (bool, str):
for s in algorithm_settings:
try:
if s.name == "num_epochs":
if not int(s.value) > 0:
return False, "{} should be greate than zero".format(s.name)
# Validate learning rate
if s.name in ["w_lr", "w_lr_min", "alpha_lr"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate weight decay
if s.name in ["w_weight_decay", "alpha_weight_decay"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate w_momentum and w_grad_clip
if s.name in ["w_momentum", "w_grad_clip"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
if s.name == "batch_size":
if s.value is not "None":
if not int(s.value) >= 1:
return False, "batch_size should be greate or equal than one"
if s.name == "num_workers":
if not int(s.value) >= 0:
return False, "num_workers should be greate or equal than zero"
# Validate "init_channels", "print_step", "num_nodes" and "stem_multiplier"
if s.name in ["init_channels", "print_step", "num_nodes", "stem_multiplier"]:
if not int(s.value) >= 1:
return False, "{} should be greate or equal than one".format(s.name)
except Exception as e:
return False, "failed to validate {name}({value}): {exception}".format(name=s.name, value=s.value,
exception=e)
return True, ""
|
def validate_algorithm_spec(algorithm_settings: list[api_pb2.AlgorithmSetting]) -> (bool, str):
for s in algorithm_settings:
try:
if s.name == "num_epochs":
if not int(s.value) > 0:
return False, "{} should be greater than zero".format(s.name)
# Validate learning rate
if s.name in ["w_lr", "w_lr_min", "alpha_lr"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate weight decay
if s.name in ["w_weight_decay", "alpha_weight_decay"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate w_momentum and w_grad_clip
if s.name in ["w_momentum", "w_grad_clip"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
if s.name == "batch_size":
if s.value is not "None":
if not int(s.value) >= 1:
return False, "batch_size should be greate or equal than one"
if s.name == "num_workers":
if not int(s.value) >= 0:
return False, "num_workers should be greate or equal than zero"
# Validate "init_channels", "print_step", "num_nodes" and "stem_multiplier"
if s.name in ["init_channels", "print_step", "num_nodes", "stem_multiplier"]:
if not int(s.value) >= 1:
return False, "{} should be greate or equal than one".format(s.name)
except Exception as e:
return False, "failed to validate {name}({value}): {exception}".format(name=s.name, value=s.value,
exception=e)
return True, ""
|
9,627 |
def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-interface-ip-add',
absent='vrouter-interface-ip-remove'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_bd=dict(required=False, type='str'),
pn_netmask=dict(required=False, type='str'),
pn_vnet=dict(required=False, type='str'),
pn_ip=dict(required=False, type='str'),
pn_nic=dict(required=False, type='str'),
pn_vrouter_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_vrouter_name", "pn_nic", "pn_ip", "pn_netmask"]],
["state", "absent", ["pn_vrouter_name", "pn_nic", "pn_ip"]]
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
bd = module.params['pn_bd']
netmask = module.params['pn_netmask']
vnet = module.params['pn_vnet']
ip = module.params['pn_ip']
nic = module.params['pn_nic']
vrouter_name = module.params['pn_vrouter_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS = check_cli(module, cli)
cli += ' %s vrouter-name %s ' % (command, vrouter_name)
if command == 'vrouter-interface-ip-add':
if VROUTER_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter with nic %s does not exist' % nic
)
if INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg='vrouter with interface %s exist' % ip
)
cli += ' nic %s ip %s ' % (nic, ip)
if bd:
cli += ' bd ' + bd
if netmask:
cli += ' netmask ' + netmask
if vnet:
cli += ' vnet ' + vnet
if command == 'vrouter-interface-ip-remove':
if VROUTER_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter with nic %s does not exist' % nic
)
if INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg='vrouter with interface %s doesnt exist' % ip
)
if nic:
cli += ' nic %s ' % nic
if ip:
cli += ' ip %s ' % ip.split('/')[0]
run_cli(module, cli, state_map)
|
def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-interface-ip-add',
absent='vrouter-interface-ip-remove'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_bd=dict(required=False, type='str'),
pn_netmask=dict(required=False, type='str'),
pn_vnet=dict(required=False, type='str'),
pn_ip=dict(required=False, type='str'),
pn_nic=dict(required=False, type='str'),
pn_vrouter_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_vrouter_name", "pn_nic", "pn_ip", "pn_netmask"]],
["state", "absent", ["pn_vrouter_name", "pn_nic", "pn_ip"]]
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
bd = module.params['pn_bd']
netmask = module.params['pn_netmask']
vnet = module.params['pn_vnet']
ip = module.params['pn_ip']
nic = module.params['pn_nic']
vrouter_name = module.params['pn_vrouter_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS = check_cli(module, cli)
cli += ' %s vrouter-name %s ' % (command, vrouter_name)
if command == 'vrouter-interface-ip-add':
if VROUTER_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter with nic %s does not exist' % nic
)
if INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg='vrouter with interface %s exist' % ip
)
cli += ' nic %s ip %s ' % (nic, ip)
if bd:
cli += ' bd ' + bd
if netmask:
cli += ' netmask ' + netmask
if vnet:
cli += ' vnet ' + vnet
if command == 'vrouter-interface-ip-remove':
if VROUTER_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter with nic %s does not exist' % nic
)
if INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter with interface %s does not exist' % ip
)
if nic:
cli += ' nic %s ' % nic
if ip:
cli += ' ip %s ' % ip.split('/')[0]
run_cli(module, cli, state_map)
|
48,564 |
def test_init_as_points_from_list():
points = [[0.0, 0.0, 0.0],
[0, 1, 0],
[0, 0, 1]]
mesh = pyvista.PolyData(points)
assert np.allclose(mesh.points, points)
|
def test_init_as_points_from_list():
points = [[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
mesh = pyvista.PolyData(points)
assert np.allclose(mesh.points, points)
|
27,391 |
def generate_streamlines(
topology_file_path: str, trajectory_file_path: str,
grid_spacing: float, MDA_selection: str,
start_frame: int, end_frame: int,
xmin: float, xmax: float, ymin: float, ymax: float,
maximum_delta_magnitude: float,
num_cores: int = 'maximum'
) -> Tuple[np.ndarray, np.ndarray, float, float]:
r"""Produce the x and y components of a 2D streamplot data set.
Parameters
----------
topology_file_path : str
Absolute path to the topology file
trajectory_file_path : str
Absolute path to the trajectory file. It will normally be desirable
to filter the trajectory with a tool such as GROMACS
:program:`g_filter` (see :cite:p:`a-Chavent2014`)
grid_spacing : float
The spacing between grid lines (angstroms)
MDA_selection : str
MDAnalysis selection string
start_frame : int
First frame number to parse
end_frame : int
Last frame number to parse
xmin : float
Minimum coordinate boundary for x-axis (angstroms)
xmax : float
Maximum coordinate boundary for x-axis (angstroms)
ymin : float
Minimum coordinate boundary for y-axis (angstroms)
ymax : float
Maximum coordinate boundary for y-axis (angstroms)
maximum_delta_magnitude : float
Absolute value of the largest displacement tolerated for the
centroid of a group of particles ( angstroms). Values above this
displacement will not count in the streamplot (treated as
excessively large displacements crossing the periodic boundary)
num_cores : int or 'maximum' (optional)
The number of cores to use. (Default 'maximum' uses all available
cores)
Returns
-------
dx_array : array of floats
An array object containing the displacements in the x direction
dy_array : array of floats
An array object containing the displacements in the y direction
average_displacement : float
:math:`\frac{\sum\sqrt[]{dx^2 + dy^2}}{N}`
standard_deviation_of_displacement : float
standard deviation of :math:`\sqrt[]{dx^2 + dy^2}`
Examples
--------
Generate 2D streamlines and plot::
import matplotlib, matplotlib.pyplot, np
import MDAnalysis, MDAnalysis.visualization.streamlines
u1, v1, average_displacement, standard_deviation_of_displacement =
MDAnalysis.visualization.streamlines.generate_streamlines('testing.gro', 'testing_filtered.xtc',
grid_spacing=20, MDA_selection='name PO4', start_frame=2, end_frame=3,
xmin=-8.73000049591, xmax= 1225.96008301,
ymin= -12.5799999237, ymax=1224.34008789,
maximum_delta_magnitude=1.0, num_cores=16)
x = np.linspace(0, 1200, 61)
y = np.linspace(0, 1200, 61)
speed = np.sqrt(u1*u1 + v1*v1)
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlabel('x ($\AA$)')
ax.set_ylabel('y ($\AA$)')
ax.streamplot(x, y, u1, v1, density=(10,10), color=speed, linewidth=3*speed/speed.max())
fig.savefig('testing_streamline.png',dpi=300)
.. image:: testing_streamline.png
References
.. bibliography::
:filter: False
:style: MDA
:keyprefix: a-
:labelprefix: ᵃ
Chavent2014
See Also
--------
MDAnalysis.visualization.streamlines_3D.generate_streamlines_3d
"""
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
#assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_list_deltas = [] # collect all data from child processes here
def log_result_to_parent(delta_array):
parent_list_deltas.extend(delta_array)
tuple_of_limits = (xmin, xmax, ymin, ymax)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
list_square_vertex_arrays_per_core, list_parent_index_values, total_rows, total_columns = \
split_grid(grid=grid,
num_cores=num_cores)
pool = multiprocessing.Pool(num_cores)
for vertex_sublist, index_sublist in zip(list_square_vertex_arrays_per_core, list_parent_index_values):
pool.apply_async(per_core_work, args=(
topology_file_path, trajectory_file_path, vertex_sublist, MDA_selection, start_frame, end_frame,
index_sublist, maximum_delta_magnitude), callback=log_result_to_parent)
pool.close()
pool.join()
dx_array = np.zeros((total_rows, total_columns))
dy_array = np.zeros((total_rows, total_columns))
#the parent_list_deltas is shaped like this: [ ([row_index,column_index],[dx,dy]), ... (...),...,]
for index_array, delta_array in parent_list_deltas: # go through the list in the parent process and assign to the
# appropriate positions in the dx and dy matrices:
#build in a filter to replace all values at the cap (currently between -8,8) with 0 to match Matthieu's code
# (I think eventually we'll reduce the cap to a narrower boundary though)
index_1 = index_array.tolist()[0]
index_2 = index_array.tolist()[1]
if abs(delta_array[0]) == maximum_delta_magnitude:
dx_array[index_1, index_2] = 0
else:
dx_array[index_1, index_2] = delta_array[0]
if abs(delta_array[1]) == maximum_delta_magnitude:
dy_array[index_1, index_2] = 0
else:
dy_array[index_1, index_2] = delta_array[1]
#at Matthieu's request, we now want to calculate the average and standard deviation of the displacement values:
displacement_array = np.sqrt(dx_array ** 2 + dy_array ** 2)
average_displacement = np.average(displacement_array)
standard_deviation_of_displacement = np.std(displacement_array)
return (dx_array, dy_array, average_displacement, standard_deviation_of_displacement)
|
def generate_streamlines(
topology_file_path: str, trajectory_file_path: str,
grid_spacing: float, MDA_selection: str,
start_frame: int, end_frame: int,
xmin: float, xmax: float, ymin: float, ymax: float,
maximum_delta_magnitude: float,
num_cores: Union[int, str] = 'maximum'
) -> Tuple[np.ndarray, np.ndarray, float, float]:
r"""Produce the x and y components of a 2D streamplot data set.
Parameters
----------
topology_file_path : str
Absolute path to the topology file
trajectory_file_path : str
Absolute path to the trajectory file. It will normally be desirable
to filter the trajectory with a tool such as GROMACS
:program:`g_filter` (see :cite:p:`a-Chavent2014`)
grid_spacing : float
The spacing between grid lines (angstroms)
MDA_selection : str
MDAnalysis selection string
start_frame : int
First frame number to parse
end_frame : int
Last frame number to parse
xmin : float
Minimum coordinate boundary for x-axis (angstroms)
xmax : float
Maximum coordinate boundary for x-axis (angstroms)
ymin : float
Minimum coordinate boundary for y-axis (angstroms)
ymax : float
Maximum coordinate boundary for y-axis (angstroms)
maximum_delta_magnitude : float
Absolute value of the largest displacement tolerated for the
centroid of a group of particles ( angstroms). Values above this
displacement will not count in the streamplot (treated as
excessively large displacements crossing the periodic boundary)
num_cores : int or 'maximum' (optional)
The number of cores to use. (Default 'maximum' uses all available
cores)
Returns
-------
dx_array : array of floats
An array object containing the displacements in the x direction
dy_array : array of floats
An array object containing the displacements in the y direction
average_displacement : float
:math:`\frac{\sum\sqrt[]{dx^2 + dy^2}}{N}`
standard_deviation_of_displacement : float
standard deviation of :math:`\sqrt[]{dx^2 + dy^2}`
Examples
--------
Generate 2D streamlines and plot::
import matplotlib, matplotlib.pyplot, np
import MDAnalysis, MDAnalysis.visualization.streamlines
u1, v1, average_displacement, standard_deviation_of_displacement =
MDAnalysis.visualization.streamlines.generate_streamlines('testing.gro', 'testing_filtered.xtc',
grid_spacing=20, MDA_selection='name PO4', start_frame=2, end_frame=3,
xmin=-8.73000049591, xmax= 1225.96008301,
ymin= -12.5799999237, ymax=1224.34008789,
maximum_delta_magnitude=1.0, num_cores=16)
x = np.linspace(0, 1200, 61)
y = np.linspace(0, 1200, 61)
speed = np.sqrt(u1*u1 + v1*v1)
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.set_xlabel('x ($\AA$)')
ax.set_ylabel('y ($\AA$)')
ax.streamplot(x, y, u1, v1, density=(10,10), color=speed, linewidth=3*speed/speed.max())
fig.savefig('testing_streamline.png',dpi=300)
.. image:: testing_streamline.png
References
.. bibliography::
:filter: False
:style: MDA
:keyprefix: a-
:labelprefix: ᵃ
Chavent2014
See Also
--------
MDAnalysis.visualization.streamlines_3D.generate_streamlines_3d
"""
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
#assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_list_deltas = [] # collect all data from child processes here
def log_result_to_parent(delta_array):
parent_list_deltas.extend(delta_array)
tuple_of_limits = (xmin, xmax, ymin, ymax)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
list_square_vertex_arrays_per_core, list_parent_index_values, total_rows, total_columns = \
split_grid(grid=grid,
num_cores=num_cores)
pool = multiprocessing.Pool(num_cores)
for vertex_sublist, index_sublist in zip(list_square_vertex_arrays_per_core, list_parent_index_values):
pool.apply_async(per_core_work, args=(
topology_file_path, trajectory_file_path, vertex_sublist, MDA_selection, start_frame, end_frame,
index_sublist, maximum_delta_magnitude), callback=log_result_to_parent)
pool.close()
pool.join()
dx_array = np.zeros((total_rows, total_columns))
dy_array = np.zeros((total_rows, total_columns))
#the parent_list_deltas is shaped like this: [ ([row_index,column_index],[dx,dy]), ... (...),...,]
for index_array, delta_array in parent_list_deltas: # go through the list in the parent process and assign to the
# appropriate positions in the dx and dy matrices:
#build in a filter to replace all values at the cap (currently between -8,8) with 0 to match Matthieu's code
# (I think eventually we'll reduce the cap to a narrower boundary though)
index_1 = index_array.tolist()[0]
index_2 = index_array.tolist()[1]
if abs(delta_array[0]) == maximum_delta_magnitude:
dx_array[index_1, index_2] = 0
else:
dx_array[index_1, index_2] = delta_array[0]
if abs(delta_array[1]) == maximum_delta_magnitude:
dy_array[index_1, index_2] = 0
else:
dy_array[index_1, index_2] = delta_array[1]
#at Matthieu's request, we now want to calculate the average and standard deviation of the displacement values:
displacement_array = np.sqrt(dx_array ** 2 + dy_array ** 2)
average_displacement = np.average(displacement_array)
standard_deviation_of_displacement = np.std(displacement_array)
return (dx_array, dy_array, average_displacement, standard_deviation_of_displacement)
|
30,966 |
def main():
credentials_json = json.loads(demisto.params().get('credentials_json', {}))
project = demisto.params().get('project_id', '')
region = demisto.params().get('region')
region = set_default_region(region)
proxy = demisto.params().get('proxy', False)
insecure = demisto.params().get('insecure', False)
scopes = ['https://www.googleapis.com/auth/cloud-platform']
client = GoogleClient('cloudfunctions', 'v1', credentials_json, scopes, proxy, insecure, project=project,
region=region)
commands = {
'google-cloud-functions-list': functions_list_command,
'google-cloud-function-regions-list': region_list_command,
'google-cloud-function-get-by-name': get_function_by_name_command,
'google-cloud-function-execute': execute_function_command,
}
'''EXECUTION CODE'''
cmd_func = demisto.command()
LOG(f'Command being called is {cmd_func}')
try:
if cmd_func == 'test-module':
functions_list_command(client, {})
demisto.results('ok')
else:
hr, outputs, raw = commands[cmd_func](client, demisto.args())
return_outputs(hr, outputs, raw)
except Exception as e:
return_error(f"Failed to execute {cmd_func} command. Error: {e}")
raise
|
def main():
credentials_json = json.loads(demisto.params().get('credentials_json', {}))
project = demisto.params().get('project_id', '')
region = demisto.params().get('region', '-')
proxy = demisto.params().get('proxy', False)
insecure = demisto.params().get('insecure', False)
scopes = ['https://www.googleapis.com/auth/cloud-platform']
client = GoogleClient('cloudfunctions', 'v1', credentials_json, scopes, proxy, insecure, project=project,
region=region)
commands = {
'google-cloud-functions-list': functions_list_command,
'google-cloud-function-regions-list': region_list_command,
'google-cloud-function-get-by-name': get_function_by_name_command,
'google-cloud-function-execute': execute_function_command,
}
'''EXECUTION CODE'''
cmd_func = demisto.command()
LOG(f'Command being called is {cmd_func}')
try:
if cmd_func == 'test-module':
functions_list_command(client, {})
demisto.results('ok')
else:
hr, outputs, raw = commands[cmd_func](client, demisto.args())
return_outputs(hr, outputs, raw)
except Exception as e:
return_error(f"Failed to execute {cmd_func} command. Error: {e}")
raise
|
31,758 |
def update_group_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
return_error("You must supply 'id' in the scim data")
member_ids_to_add = args.get('memberIdsToAdd')
member_ids_to_delete = args.get('memberIdsToDelete')
if member_ids_to_add:
if type(member_ids_to_add) != list:
member_ids_to_add = json.loads(member_ids_to_add)
for member_id in member_ids_to_add:
operation = {
"op": "add",
"path": "members",
"value": [{"value": member_id}]
}
group_input = {'schemas': [patchSchema], 'Operations': [operation]}
res = client.update_group(group_id, group_input)
if res.status_code != 204:
res_json = res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
if member_ids_to_delete:
if type(member_ids_to_delete) is not list:
member_ids_to_delete = json.loads(member_ids_to_delete)
for member_id in member_ids_to_delete:
operation = {
"op": "remove",
"path": "members",
"value": [{"value": member_id}]
}
group_input = {'schemas': [patchSchema], 'Operations': [operation]}
res = client.update_group(group_id, group_input)
if res.status_code != 204:
res_json = res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
if res.status_code == 204:
res_json = res.headers
generic_iam_context = OutputContext(success=True, iden=group_id, displayName=group_name, details=str(res_json))
elif res.status_code == 404:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=404,
errorMessage="Group/User Not Found or User not a member of group",
details=res_json)
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name,
errorCode=res_json.get('code'), errorMessage=res_json.get('message'),
details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
|
def update_group_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
return_error("You must supply 'id' in the scim data")
member_ids_to_add = args.get('memberIdsToAdd')
member_ids_to_delete = args.get('memberIdsToDelete')
if member_ids_to_add:
if type(member_ids_to_add) != list:
member_ids_to_add = json.loads(member_ids_to_add)
for member_id in member_ids_to_add:
operation = {
"op": "add",
"path": "members",
"value": [{"value": member_id}]
}
group_input = {'schemas': [patchSchema], 'Operations': [operation]}
res = client.update_group(group_id, group_input)
if res.status_code != 204:
res_json = res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
if member_ids_to_delete:
if not isinstance(member_ids_to_delete, list):
member_ids_to_delete = json.loads(member_ids_to_delete)
for member_id in member_ids_to_delete:
operation = {
"op": "remove",
"path": "members",
"value": [{"value": member_id}]
}
group_input = {'schemas': [patchSchema], 'Operations': [operation]}
res = client.update_group(group_id, group_input)
if res.status_code != 204:
res_json = res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, iden=member_id,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
if res.status_code == 204:
res_json = res.headers
generic_iam_context = OutputContext(success=True, iden=group_id, displayName=group_name, details=str(res_json))
elif res.status_code == 404:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name, errorCode=404,
errorMessage="Group/User Not Found or User not a member of group",
details=res_json)
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False, iden=group_id, displayName=group_name,
errorCode=res_json.get('code'), errorMessage=res_json.get('message'),
details=res_json)
readable_output = tableToMarkdown('AWS Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output
)
|
34,456 |
def _collect_action_executed_predictions(
processor: "MessageProcessor",
partial_tracker: DialogueStateTracker,
event: ActionExecuted,
fail_on_prediction_errors: bool,
circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, Optional[Text], Optional[float]]:
from rasa.core.policies.form_policy import FormPolicy
from rasa.core.policies.rule_policy import RulePolicy
action_executed_eval_store = EvaluationStore()
gold = event.action_name
if circuit_breaker_tripped:
predicted = "circuit breaker tripped"
policy = None
confidence = None
else:
action, policy, confidence = processor.predict_next_action(partial_tracker)
predicted = action.name()
if (
policy
and predicted != gold
and _form_might_have_been_rejected(
processor.domain, partial_tracker, predicted
)
):
# Wrong policy was predicted,
# but it might be Ok if form action is rejected.
_emulate_form_rejection(partial_tracker)
# try again
action, policy, confidence = processor.predict_next_action(partial_tracker)
if action.name() == gold:
predicted = action.name()
else:
_undo_emulating_form_rejection(partial_tracker)
action_executed_eval_store.add_to_store(
action_predictions=predicted, action_targets=gold
)
if action_executed_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyPredictedAction(
gold, predicted, event.policy, event.confidence, event.timestamp
)
)
if fail_on_prediction_errors:
error_msg = (
"Model predicted a wrong action. Failed Story: "
"\n\n{}".format(partial_tracker.export_stories())
)
if FormPolicy.__name__ in policy:
error_msg += (
"FormAction is not run during "
"evaluation therefore it is impossible to know "
"if validation failed or this story is wrong. "
"If the story is correct, add it to the "
"training stories and retrain."
)
raise ValueError(error_msg)
else:
partial_tracker.update(event)
return action_executed_eval_store, policy, confidence
|
def _collect_action_executed_predictions(
processor: "MessageProcessor",
partial_tracker: DialogueStateTracker,
event: ActionExecuted,
fail_on_prediction_errors: bool,
circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, Optional[Text], Optional[float]]:
from rasa.core.policies.form_policy import FormPolicy
from rasa.core.policies.rule_policy import RulePolicy
action_executed_eval_store = EvaluationStore()
gold = event.action_name
if circuit_breaker_tripped:
predicted = "circuit breaker tripped"
policy = None
confidence = None
else:
action, policy, confidence = processor.predict_next_action(partial_tracker)
predicted = action.name()
if (
policy
and predicted != gold
and _form_might_have_been_rejected(
processor.domain, partial_tracker, predicted
)
):
# Wrong action was predicted,
# but it might be Ok if form action is rejected.
_emulate_form_rejection(partial_tracker)
# try again
action, policy, confidence = processor.predict_next_action(partial_tracker)
if action.name() == gold:
predicted = action.name()
else:
_undo_emulating_form_rejection(partial_tracker)
action_executed_eval_store.add_to_store(
action_predictions=predicted, action_targets=gold
)
if action_executed_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyPredictedAction(
gold, predicted, event.policy, event.confidence, event.timestamp
)
)
if fail_on_prediction_errors:
error_msg = (
"Model predicted a wrong action. Failed Story: "
"\n\n{}".format(partial_tracker.export_stories())
)
if FormPolicy.__name__ in policy:
error_msg += (
"FormAction is not run during "
"evaluation therefore it is impossible to know "
"if validation failed or this story is wrong. "
"If the story is correct, add it to the "
"training stories and retrain."
)
raise ValueError(error_msg)
else:
partial_tracker.update(event)
return action_executed_eval_store, policy, confidence
|
1,577 |
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
# Validation of the arguments
if n_classes == 0 and not allow_unlabeled:
raise ValueError(
"Invalid set of arguments passed: " +
"n_classes = 0 and allow_unlabeled = False"
)
if length == 0:
raise ValueError("Invalid argument passed: length = 0")
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
|
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
# Validation of the arguments
if n_classes < 1:
raise ValueError(
"Invalid set of arguments passed: " +
"n_classes = 0 and allow_unlabeled = False"
)
if length == 0:
raise ValueError("Invalid argument passed: length = 0")
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
|
36,548 |
def _parse_tz_delta(tz_delta):
match = re.match(
r"(?P<sign>[+-])?(?P<h>\d{1,2})(:(?P<m>\d{2})(:(?P<s>\d{2}))?)?",
tz_delta,
)
# Anything passed to this function should already have hit an equivalent
# regular expression to find the section to parse.
assert match is not None, tz_delta
h, m, s = (
int(v) if v is not None else 0
for v in map(match.group, ("h", "m", "s"))
)
total = h * 3600 + m * 60 + s
if not -86400 < total < 86400:
raise ValueError(
f"Offset must be strictly between -24h and +24h:{tz_delta}"
)
# Yes, +5 maps to an offset of -5h
if match.group("sign") != "-":
total *= -1
return total
|
def _parse_tz_delta(tz_delta):
match = re.match(
r"(?P<sign>[+-])?(?P<h>\d{1,2})(:(?P<m>\d{2})(:(?P<s>\d{2}))?)?",
tz_delta,
)
# Anything passed to this function should already have hit an equivalent
# regular expression to find the section to parse.
assert match is not None, tz_delta
h, m, s = (
int(v) if v is not None else 0
for v in map(match.group, ("h", "m", "s"))
)
total = h * 3600 + m * 60 + s
if not -86400 < total < 86400:
raise ValueError(
f"Offset must be strictly between -24h and +24h: {tz_delta}"
)
# Yes, +5 maps to an offset of -5h
if match.group("sign") != "-":
total *= -1
return total
|
44,168 |
def cov_matrix(prob, obs, wires=None, diag_approx=False):
"""Calculate the covariance matrix of a list of commuting observables, given
the joint probability distribution of the system in the shared eigenbasis.
.. note::
This method only works for **commuting observables.**
If the probability distribution is the result of a quantum circuit,
the quantum state must be rotated into the shared
eigenbasis of the list of observables before measurement.
Args:
prob (tensor_like): probability distribution
obs (list[.Observable]): a list of observables for which
to compute the covariance matrix
diag_approx (bool): if True, return the diagonal approximation
wires (.Wires): The wire register of the system. If not provided,
it is assumed that the wires are labelled with consecutive integers.
Returns:
tensor_like: the covariance matrix of size ``(len(obs), len(obs))``
**Example**
Consider the following ansatz and observable list:
>>> obs_list = [qml.PauliX(0) @ qml.PauliZ(1), qml.PauliY(2)]
>>> ansatz = qml.templates.StronglyEntanglingLayers
We can construct a QNode to output the probability distribution in the shared eigenbasis of the
observables:
.. code-block:: python
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
ansatz(weights, wires=[0, 1, 2])
# rotate into the basis of the observables
for o in obs_list:
o.diagonalizing_gates()
return qml.probs(wires=[0, 1, 2])
We can now compute the covariance matrix:
>>> shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=3)
>>> weights = np.random.random(shape, requires_grad=True)
>>> cov = qml.math.cov_matrix(circuit(weights), obs_list)
>>> cov
array([[0.98707611, 0.03665537],
[0.03665537, 0.99998377]])
Autodifferentiation is fully supported using all interfaces.
Here we use autograd:
>>> cost_fn = lambda weights: qml.math.cov_matrix(circuit(weights), obs_list)[0, 1]
>>> qml.grad(cost_fn)(weights)[0]
array([[[ 4.94240914e-17, -2.33786398e-01, -1.54193959e-01],
[-3.05414996e-17, 8.40072236e-04, 5.57884080e-04],
[ 3.01859411e-17, 8.60411436e-03, 6.15745204e-04]],
[[ 6.80309533e-04, -1.23162742e-03, 1.08729813e-03],
[-1.53863193e-01, -1.38700657e-02, -1.36243323e-01],
[-1.54665054e-01, -1.89018172e-02, -1.56415558e-01]]])
"""
variances = []
# diagonal variances
for i, o in enumerate(obs):
eigvals = cast(o.eigvals(), dtype=float64)
w = o.wires.labels if wires is None else wires.indices(o.wires)
p = marginal_prob(prob, w)
res = dot(eigvals**2, p) - (dot(eigvals, p)) ** 2
variances.append(res)
cov = diag(variances)
if diag_approx:
return cov
for i, j in itertools.combinations(range(len(obs)), r=2):
o1 = obs[i]
o2 = obs[j]
o1wires = o1.wires.labels if wires is None else wires.indices(o1.wires)
o2wires = o2.wires.labels if wires is None else wires.indices(o2.wires)
shared_wires = set(o1wires + o2wires)
l1 = cast(o1.eigvals(), dtype=float64)
l2 = cast(o2.eigvals(), dtype=float64)
l12 = cast(np.kron(l1, l2), dtype=float64)
p1 = marginal_prob(prob, o1wires)
p2 = marginal_prob(prob, o2wires)
p12 = marginal_prob(prob, shared_wires)
res = dot(l12, p12) - dot(l1, p1) * dot(l2, p2)
cov = scatter_element_add(cov, [i, j], res)
cov = scatter_element_add(cov, [j, i], res)
return cov
|
def cov_matrix(prob, obs, wires=None, diag_approx=False):
"""Calculate the covariance matrix of a list of commuting observables, given
the joint probability distribution of the system in the shared eigenbasis.
.. note::
This method only works for **commuting observables.**
If the probability distribution is the result of a quantum circuit,
the quantum state must be rotated into the shared
eigenbasis of the list of observables before measurement.
Args:
prob (tensor_like): probability distribution
obs (list[.Observable]): a list of observables for which
to compute the covariance matrix
diag_approx (bool): if True, return the diagonal approximation
wires (.Wires): The wire register of the system. If not provided,
it is assumed that the wires are labelled with consecutive integers.
Returns:
tensor_like: the covariance matrix of size ``(len(obs), len(obs))``
**Example**
Consider the following ansatz and observable list:
>>> obs_list = [qml.PauliX(0) @ qml.PauliZ(1), qml.PauliY(2)]
>>> ansatz = qml.templates.StronglyEntanglingLayers
We can construct a QNode to output the probability distribution in the shared eigenbasis of the
observables:
.. code-block:: python
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
ansatz(weights, wires=[0, 1, 2])
# rotate into the basis of the observables
for o in obs_list:
o.diagonalizing_gates()
return qml.probs(wires=[0, 1, 2])
We can now compute the covariance matrix:
>>> shape = qml.templates.StronglyEntanglingLayers.shape(n_layers=2, n_wires=3)
>>> weights = np.random.random(shape, requires_grad=True)
>>> cov = qml.math.cov_matrix(circuit(weights), obs_list)
>>> cov
array([[0.98707611, 0.03665537],
[0.03665537, 0.99998377]])
Autodifferentiation is fully supported using all interfaces.
Here we use autograd:
>>> cost_fn = lambda weights: qml.math.cov_matrix(circuit(weights), obs_list)[0, 1]
>>> qml.grad(cost_fn)(weights)[0]
def _density_matrix_from_matrix(density_matrix, wires, check_state=False):
array([[[ 4.94240914e-17, -2.33786398e-01, -1.54193959e-01],
[-3.05414996e-17, 8.40072236e-04, 5.57884080e-04],
[ 3.01859411e-17, 8.60411436e-03, 6.15745204e-04]],
[[ 6.80309533e-04, -1.23162742e-03, 1.08729813e-03],
[-1.53863193e-01, -1.38700657e-02, -1.36243323e-01],
[-1.54665054e-01, -1.89018172e-02, -1.56415558e-01]]])
"""
variances = []
# diagonal variances
for i, o in enumerate(obs):
eigvals = cast(o.eigvals(), dtype=float64)
w = o.wires.labels if wires is None else wires.indices(o.wires)
p = marginal_prob(prob, w)
res = dot(eigvals**2, p) - (dot(eigvals, p)) ** 2
variances.append(res)
cov = diag(variances)
if diag_approx:
return cov
for i, j in itertools.combinations(range(len(obs)), r=2):
o1 = obs[i]
o2 = obs[j]
o1wires = o1.wires.labels if wires is None else wires.indices(o1.wires)
o2wires = o2.wires.labels if wires is None else wires.indices(o2.wires)
shared_wires = set(o1wires + o2wires)
l1 = cast(o1.eigvals(), dtype=float64)
l2 = cast(o2.eigvals(), dtype=float64)
l12 = cast(np.kron(l1, l2), dtype=float64)
p1 = marginal_prob(prob, o1wires)
p2 = marginal_prob(prob, o2wires)
p12 = marginal_prob(prob, shared_wires)
res = dot(l12, p12) - dot(l1, p1) * dot(l2, p2)
cov = scatter_element_add(cov, [i, j], res)
cov = scatter_element_add(cov, [j, i], res)
return cov
|
30,809 |
def process_update_command(client, args, old_scim, new_scim, format_pre_text):
parsed_old_scim = map_scim(old_scim)
user_id = parsed_old_scim.get('id')
if not (user_id):
raise Exception('You must provide id of the user')
res = client.get_user('id', user_id)
try:
existing_user = res.json()
except Exception:
existing_user = res
if res.status_code == 200:
map_changes_to_existing_user(existing_user, new_scim)
# custom mapping
envoy_user = client.build_envoy_user(args, existing_user, new_scim, 'update')
# Removing Elements from envoy_user dictionary which was not sent as part of scim
envoy_user = {key: value for key, value in envoy_user.items() if value is not None}
res_update = client.update_user(user_term=user_id, data=envoy_user)
if res_update.status_code == 200:
res_json = res_update.json()
active = res_json.get('active', False)
generic_iam_context = OutputContext(success=True,
iden=user_id,
details=res_json,
active=active)
elif res_update.status_code == 404:
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res_update.status_code,
errorMessage=USER_NOT_FOUND,
details=res_update.headers.get('status'))
else:
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res_update.status_code,
errorMessage=res_update.headers.get('status'),
details=res_update.headers.get('status'))
else: # api returns 404, not found for user not found case.
generic_iam_context = OutputContext(success=False, iden=user_id,
errorCode=res.status_code,
errorMessage=res.headers.get('status'), details=str(existing_user))
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(name=f'{format_pre_text} Envoy User:',
t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id",
"username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def process_update_command(client, args, old_scim, new_scim, command_name):
parsed_old_scim = map_scim(old_scim)
user_id = parsed_old_scim.get('id')
if not (user_id):
raise Exception('You must provide id of the user')
res = client.get_user('id', user_id)
try:
existing_user = res.json()
except Exception:
existing_user = res
if res.status_code == 200:
map_changes_to_existing_user(existing_user, new_scim)
# custom mapping
envoy_user = client.build_envoy_user(args, existing_user, new_scim, 'update')
# Removing Elements from envoy_user dictionary which was not sent as part of scim
envoy_user = {key: value for key, value in envoy_user.items() if value is not None}
res_update = client.update_user(user_term=user_id, data=envoy_user)
if res_update.status_code == 200:
res_json = res_update.json()
active = res_json.get('active', False)
generic_iam_context = OutputContext(success=True,
iden=user_id,
details=res_json,
active=active)
elif res_update.status_code == 404:
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res_update.status_code,
errorMessage=USER_NOT_FOUND,
details=res_update.headers.get('status'))
else:
generic_iam_context = OutputContext(success=False,
iden=user_id,
errorCode=res_update.status_code,
errorMessage=res_update.headers.get('status'),
details=res_update.headers.get('status'))
else: # api returns 404, not found for user not found case.
generic_iam_context = OutputContext(success=False, iden=user_id,
errorCode=res.status_code,
errorMessage=res.headers.get('status'), details=str(existing_user))
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown(name=f'{format_pre_text} Envoy User:',
t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id",
"username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
58,065 |
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType[LogType.AUTHENTICATION]: last_run,
LogType[LogType.ADMINISTRATION]: last_run,
LogType[LogType.TELEPHONY]: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
demisto.results('ok')
elif command == 'duo-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType.AUTHENTICATION.value: last_run,
LogType.ADMINISTRATION.value: last_run,
LogType.TELEPHONY.value: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
demisto.results('ok')
elif command == 'duo-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
17,713 |
def load_cif(file_or_path=None):
"""Load a CifFile object into memory, return an mbuild.Lattice.
"""
assert isinstance(file_or_path, str) or isinstance(file_or_path, pathlib.Path)
cif_location = pathlib.Path(file_or_path)
reader = garnett.ciffilereader.CifFileReader()
with open(cif_location.absolute(), 'r') as fp:
my_cif = reader.read(fp)
# only need the first frame, not used as a trajectory
frame = my_cif[0]
# convert angstroms to nanometers
lattice_spacing = [frame.Lx, frame.Ly, frame.Lz] / 10
# create lattice_points dictionary
position_dict = defaultdict(list)
for elem_id, coords in zip(frane.typeid, frame.cif_coordinates):
position_dict[frame.types[elem_id]].append(list(coords))
box_vectors = frame.box.get_box_matrix()
return Lattice(lattice_spacing=lattice_spacing,
lattice_vectors=box_vectors,
lattice_points=lattice_points)
|
def load_cif(file_or_path=None):
"""Load a CifFile object into memory, return an mbuild.Lattice.
"""
assert isinstance(file_or_path, str) or isinstance(file_or_path, pathlib.Path)
cif_location = pathlib.Path(file_or_path)
reader = garnett.ciffilereader.CifFileReader()
with open(cif_location.absolute(), 'r') as fp:
my_cif = reader.read(fp)
# only need the first frame, not used as a trajectory
frame = my_cif[0]
# convert angstroms to nanometers
lattice_spacing = [frame.Lx, frame.Ly, frame.Lz] / 10
# create lattice_points dictionary
position_dict = defaultdict(list)
for elem_id, coords in zip(frame.typeid, frame.cif_coordinates):
position_dict[frame.types[elem_id]].append(list(coords))
box_vectors = frame.box.get_box_matrix()
return Lattice(lattice_spacing=lattice_spacing,
lattice_vectors=box_vectors,
lattice_points=lattice_points)
|
19,092 |
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# These vars affect how the compiler finds libraries and include dirs.
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('C_INCLUDE_PATH')
env.unset('CPLUS_INCLUDE_PATH')
env.unset('OBJC_INCLUDE_PATH')
env.unset('CMAKE_PREFIX_PATH')
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
env.unset('MAKEFLAGS')
# Avoid that libraries of build dependencies get hijacked.
env.unset('LD_PRELOAD')
env.unset('DYLD_INSERT_LIBRARIES')
# Avoid <packagename>_ROOT user variables overriding spack dependencies
# https://cmake.org/cmake/help/latest/variable/PackageName_ROOT.html
for varname in os.environ.keys():
if '_ROOT' in varname:
env.unset(varname)
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
on_cray, using_cnl = _on_cray()
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
|
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# These vars affect how the compiler finds libraries and include dirs.
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('C_INCLUDE_PATH')
env.unset('CPLUS_INCLUDE_PATH')
env.unset('OBJC_INCLUDE_PATH')
env.unset('CMAKE_PREFIX_PATH')
# Affects GNU make, can e.g. indirectly inhibit enabling parallel build
env.unset('MAKEFLAGS')
# Avoid that libraries of build dependencies get hijacked.
env.unset('LD_PRELOAD')
env.unset('DYLD_INSERT_LIBRARIES')
# Avoid <packagename>_ROOT user variables overriding spack dependencies
# https://cmake.org/cmake/help/latest/variable/PackageName_ROOT.html
for varname in os.environ.keys():
if varname.endswith('_ROOT'):
env.unset(varname)
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
on_cray, using_cnl = _on_cray()
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
|
34,228 |
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name not in registered_components:
if component_name not in old_style_names:
try:
return class_from_module_path(component_name)
except ModuleNotFoundError as e:
# when component_name is a path to a class but that path is invalid
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n{}".format(
component_name, e.msg
)
)
except AttributeError:
# when component_name is a path to a class but the path does not contain that class
module_name, _, class_name = component_name.rpartition(".")
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n"
"Cannot find class '{}' in module {}.".format(
component_name, class_name, module_name
)
)
except ImportError:
# when component_name is a class name and not part of old_style_names
raise Exception(
"Failed to find component class for '{0}'.Unknown component name.\n"
"Cannot import class '{0}' from global namespace.".format(
component_name
)
)
else:
# DEPRECATED ensures compatibility, remove in future versions
logger.warning(
"DEPRECATION warning: your nlu config file "
"contains old style component name `{}`, "
"you should change it to its class name: `{}`."
"".format(component_name, old_style_names[component_name])
)
component_name = old_style_names[component_name]
return registered_components[component_name]
|
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name not in registered_components:
if component_name not in old_style_names:
try:
return class_from_module_path(component_name)
except ModuleNotFoundError as e:
# when component_name is a path to a class but that path is invalid
raise Exception(
"Failed to find module '{}'. \n{}".format(
component_name, e.msg
)
)
except AttributeError:
# when component_name is a path to a class but the path does not contain that class
module_name, _, class_name = component_name.rpartition(".")
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n"
"Cannot find class '{}' in module {}.".format(
component_name, class_name, module_name
)
)
except ImportError:
# when component_name is a class name and not part of old_style_names
raise Exception(
"Failed to find component class for '{0}'.Unknown component name.\n"
"Cannot import class '{0}' from global namespace.".format(
component_name
)
)
else:
# DEPRECATED ensures compatibility, remove in future versions
logger.warning(
"DEPRECATION warning: your nlu config file "
"contains old style component name `{}`, "
"you should change it to its class name: `{}`."
"".format(component_name, old_style_names[component_name])
)
component_name = old_style_names[component_name]
return registered_components[component_name]
|
36,025 |
def test_ambiguous_label_uuid(setup_codes):
"""Situation: LABEL of entity_03 is exactly equal to UUID of entity_01.
Verify that using an ambiguous identifier gives precedence to the UUID interpretation
Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL
"""
entity_01, entity_02, entity_03 = setup_codes
param = CodeParamType()
identifier = '{}'.format(entity_03.label)
result = param.convert(identifier, None, None)
assert result.uuid == entity_01.uuid
identifier = '{}{}'.format(entity_03.label, OrmEntityLoader.label_ambiguity_breaker)
result = param.convert(identifier, None, None)
assert result.uuid == entity_03.uuid
|
def test_ambiguous_label_uuid(setup_codes):
"""Situation: LABEL of entity_03 is exactly equal to UUID of entity_01.
Verify that using an ambiguous identifier gives precedence to the UUID interpretation
Appending the special ambiguity breaker character will force the identifier to be treated as a LABEL
"""
entity_01, entity_02, entity_03 = setup_codes
entity_01, _, entity_03 = setup_codes
identifier = '{}'.format(entity_03.label)
result = param.convert(identifier, None, None)
assert result.uuid == entity_01.uuid
identifier = '{}{}'.format(entity_03.label, OrmEntityLoader.label_ambiguity_breaker)
result = param.convert(identifier, None, None)
assert result.uuid == entity_03.uuid
|
3,856 |
def floyd_warshall_numpy(G, nodelist=None, weight="weight"):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
This algorithm for finding shortest paths takes advantage of
matrix representations of a graph and works well for dense
graphs where all-pairs shortest path is desired. The results
are returns in a numpy array with each column and row
representing a node and entries providing the distance along
the shortest path between that row's node and column's node.
If no path exists the distance is Inf.
Parameters
----------
G : NetworkX graph
nodelist : list, optional (default= the order of G.nodes)
The rows and columns are ordered by the nodes in nodelist.
If nodelist is None then the ordering is produced by G.nodes.
Nodelist should include all nodes in G.
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
If there is no path between to nodes the corresponding matrix entry
will be Inf.
Notes
-----
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are negative
cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
Raises
------
NetworkXError
If nodelist does not contain all nodes in G.
"""
import numpy as np
if nodelist is not None:
if not (len(nodelist) == len(G) == len(set(nodelist))):
msg = ("nodelist must contain every node in G with no repeats."
"If you wanted a subgraph of G use G.subgraph(nodelist)")
raise nx.NetworkXError(msg)
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_array(
G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf
)
n, m = A.shape
np.fill_diagonal(A, 0) # diagonal elements should be zero
for i in range(n):
# The second term has the same shape as A due to broadcasting
A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis])
return A
|
def floyd_warshall_numpy(G, nodelist=None, weight="weight"):
"""Find all-pairs shortest path lengths using Floyd's algorithm.
This algorithm for finding shortest paths takes advantage of
matrix representations of a graph and works well for dense
graphs where all-pairs shortest path is desired. The results
are returned as a NumPy array with each column and row
representing a node and entries providing the distance along
the shortest path between that row's node and column's node.
If no path exists the distance is Inf.
Parameters
----------
G : NetworkX graph
nodelist : list, optional (default= the order of G.nodes)
The rows and columns are ordered by the nodes in nodelist.
If nodelist is None then the ordering is produced by G.nodes.
Nodelist should include all nodes in G.
weight: string, optional (default= 'weight')
Edge data key corresponding to the edge weight.
Returns
-------
distance : NumPy matrix
A matrix of shortest path distances between nodes.
If there is no path between to nodes the corresponding matrix entry
will be Inf.
Notes
-----
Floyd's algorithm is appropriate for finding shortest paths in
dense graphs or graphs with negative weights when Dijkstra's
algorithm fails. This algorithm can still fail if there are negative
cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.
Raises
------
NetworkXError
If nodelist does not contain all nodes in G.
"""
import numpy as np
if nodelist is not None:
if not (len(nodelist) == len(G) == len(set(nodelist))):
msg = ("nodelist must contain every node in G with no repeats."
"If you wanted a subgraph of G use G.subgraph(nodelist)")
raise nx.NetworkXError(msg)
# To handle cases when an edge has weight=0, we must make sure that
# nonedges are not given the value 0 as well.
A = nx.to_numpy_array(
G, nodelist=nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf
)
n, m = A.shape
np.fill_diagonal(A, 0) # diagonal elements should be zero
for i in range(n):
# The second term has the same shape as A due to broadcasting
A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis])
return A
|
6,770 |
def get_google_drive_status():
return True if frappe.db.exists("Google Drive", {"enable": 1}) else False
|
def get_google_drive_status():
return frappe.db.exists("Google Drive", {"enable": 1})
|
31,605 |
def create_computer_command(client: Client, expand: List[str], overrides: bool, host_name: str,
display_name: Optional[str], description: Optional[str], group_id: Optional[int],
policy_id: Optional[int], asset_importance_id: Optional[int],
relay_list_id: Optional[int]) -> CommandResults:
"""
Create a new computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
host_name (str): The hostname of the computer.
display_name (Optional[str]): The display name of the computer.
description (Optional[str]): The description about the new computer.
group_id (Optional[int]): The computer group ID of the new computer.
policy_id (Optional[int]): The ID of the desired policy to apply to new computer.
asset_importance_id (Optional[int]): The asset importance ID to assign to the new computer.
relay_list_id (Optional[int]): The ID of the relay list to assign to the new computer.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.create_computer(expand=expand, overrides=overrides, host_name=host_name,
display_name=display_name, description=description, group_id=group_id,
policy_id=policy_id, asset_importance_id=asset_importance_id,
relay_list_id=relay_list_id)
markdown = tableToMarkdown(f"Details for the new computer {response['hostName']}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="ID", outputs=response,
readable_output=markdown, raw_response=response)
|
def create_computer_command(client: Client, expand: List[str], overrides: bool, host_name: str,
display_name: Optional[str], description: Optional[str], group_id: Optional[int],
policy_id: Optional[int], asset_importance_id: Optional[int],
relay_list_id: Optional[int]) -> CommandResults:
"""
Create a new computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
host_name (str): The hostname of the computer.
display_name (Optional[str]): The display name of the computer.
description (Optional[str]): The description about the new computer.
group_id (Optional[int]): The computer group ID of the new computer.
policy_id (Optional[int]): The ID of the desired policy to apply to new computer.
asset_importance_id (Optional[int]): The asset importance ID to assign to the new computer.
relay_list_id (Optional[int]): The ID of the relay list to assign to the new computer.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.create_computer(expand=expand, overrides=overrides, host_name=host_name,
display_name=display_name, description=description, group_id=group_id,
policy_id=policy_id, asset_importance_id=asset_importance_id,
relay_list_id=relay_list_id)
markdown = tableToMarkdown(f"Details for the new computer {response.get('hostName')}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="ID", outputs=response,
readable_output=markdown, raw_response=response)
|
14,702 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, conf, home)
if not data.get_camera_names():
return None
except pyatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the access to Netatmo binary sensor."""
home = config.get(CONF_HOME)
timeout = config.get(CONF_TIMEOUT)
if timeout is None:
timeout = DEFAULT_TIMEOUT
module_name = None
import pyatmo
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
data = CameraData(hass, auth, home)
if not data.get_camera_names():
return None
except pyatmo.NoDevice:
return None
welcome_sensors = config.get(
CONF_WELCOME_SENSORS, WELCOME_SENSOR_TYPES)
presence_sensors = config.get(
CONF_PRESENCE_SENSORS, PRESENCE_SENSOR_TYPES)
tag_sensors = config.get(CONF_TAG_SENSORS, TAG_SENSOR_TYPES)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if camera_type == 'NACamera':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in welcome_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
if camera_type == 'NOC':
if CONF_CAMERAS in config:
if config[CONF_CAMERAS] != [] and \
camera_name not in config[CONF_CAMERAS]:
continue
for variable in presence_sensors:
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
for module_name in data.get_module_names(camera_name):
for variable in tag_sensors:
camera_type = None
add_entities([NetatmoBinarySensor(
data, camera_name, module_name, home, timeout,
camera_type, variable)], True)
|
58,321 |
def load(path, filename, **kwargs):
"""Load network from file.
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
: dict[str, Union[keras.models.Sequential, function]]
A dictionary of objects that constitute the input model. It must
contain the following two keys:
- 'model': keras.models.Sequential
Keras model instance of the network.
- 'val_fn': function
Function that allows evaluating the original model.
"""
import os
from keras import models, metrics
filepath = str(os.path.join(path, filename))
if os.path.exists(filepath + '.json'):
model = models.model_from_json(open(filepath + '.json').read())
try:
model.load_weights(filepath + '.h5')
except Exception: # Allows h5 files without a .h5 extension to be loaded
model.load_weights(filepath)
# With this loading method, optimizer and loss cannot be recovered.
# Could be specified by user, but since they are not really needed
# at inference time, set them to the most common choice.
# TODO: Proper reinstantiation should be doable since Keras2
model.compile('sgd', 'categorical_crossentropy',
['accuracy', metrics.top_k_categorical_accuracy])
else:
from snntoolbox.parsing.utils import get_custom_activations_dict, \
assemble_custom_dict, get_custom_layers_dict
filepath_custom_objects = kwargs.get('filepath_custom_objects', None)
if filepath_custom_objects is not None:
filepath_custom_objects = str(filepath_custom_objects) # python 2
custom_dicts = assemble_custom_dict(
get_custom_activations_dict(filepath_custom_objects),
get_custom_layers_dict())
if "config" in kwargs.keys():
custom_dicts_path = kwargs['config'].get(
'paths', 'filepath_custom_objects')
custom_dicts = assemble_custom_dict(
custom_dicts,
get_custom_activations_dict(custom_dicts_path))
try:
model = models.load_model(
filepath + '.h5',
custom_dicts)
except OSError as e:
print(e)
model = models.load_model(
filepath,
custom_dicts)
model.compile(model.optimizer, model.loss,
['accuracy', metrics.top_k_categorical_accuracy])
model.summary()
return {'model': model, 'val_fn': model.evaluate}
|
def load(path, filename, **kwargs):
"""Load network from file.
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
: dict[str, Union[keras.models.Sequential, function]]
A dictionary of objects that constitute the input model. It must
contain the following two keys:
- 'model': keras.models.Sequential
Keras model instance of the network.
- 'val_fn': function
Function that allows evaluating the original model.
"""
import os
from keras import models, metrics
filepath = str(os.path.join(path, filename))
if os.path.exists(filepath + '.json'):
model = models.model_from_json(open(filepath + '.json').read())
try:
model.load_weights(filepath + '.h5')
# Allows h5 files without a .h5 extension to be loaded.
except OSError:
model.load_weights(filepath)
# With this loading method, optimizer and loss cannot be recovered.
# Could be specified by user, but since they are not really needed
# at inference time, set them to the most common choice.
# TODO: Proper reinstantiation should be doable since Keras2
model.compile('sgd', 'categorical_crossentropy',
['accuracy', metrics.top_k_categorical_accuracy])
else:
from snntoolbox.parsing.utils import get_custom_activations_dict, \
assemble_custom_dict, get_custom_layers_dict
filepath_custom_objects = kwargs.get('filepath_custom_objects', None)
if filepath_custom_objects is not None:
filepath_custom_objects = str(filepath_custom_objects) # python 2
custom_dicts = assemble_custom_dict(
get_custom_activations_dict(filepath_custom_objects),
get_custom_layers_dict())
if "config" in kwargs.keys():
custom_dicts_path = kwargs['config'].get(
'paths', 'filepath_custom_objects')
custom_dicts = assemble_custom_dict(
custom_dicts,
get_custom_activations_dict(custom_dicts_path))
try:
model = models.load_model(
filepath + '.h5',
custom_dicts)
except OSError as e:
print(e)
model = models.load_model(
filepath,
custom_dicts)
model.compile(model.optimizer, model.loss,
['accuracy', metrics.top_k_categorical_accuracy])
model.summary()
return {'model': model, 'val_fn': model.evaluate}
|
4,351 |
def test_fnirs_channel_naming_and_order_custom_chroma():
"""Ensure fNIRS channel checking on manually created data."""
data = np.random.normal(size=(6, 10))
# Start with a correctly named raw intensity dataset
# These are the steps required to build an fNIRS Raw object from scratch
ch_names = ['S1_D1 hbo', 'S1_D1 hbr', 'S2_D1 hbo', 'S2_D1 hbr',
'S3_D1 hbo', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
chroma = np.unique(_channel_chromophore(raw))
picks = _check_channels_ordered(raw, chroma)
assert len(picks) == len(raw.ch_names)
assert len(picks) == 6
# Test block creation fails
ch_names = ['S1_D1 hbo', 'S2_D1 hbo', 'S3_D1 hbo',
'S1_D1 hbr', 'S2_D1 hbr', 'S3_D1 hbr']
ch_types = np.repeat(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='not ordered .* chromophore'):
_check_channels_ordered(raw, ["hbo", "hbr"])
# Reordering should fix
raw.pick(picks=[0, 3, 1, 4, 2, 5])
_check_channels_ordered(raw, ["hbo", "hbr"])
# Wrong names should fail
with pytest.raises(ValueError, match='not ordered .* chromophore'):
_check_channels_ordered(raw, ["hbb", "hbr"])
# Test weird naming
ch_names = ['S1_D1 hbb', 'S1_D1 hbr', 'S2_D1 hbb', 'S2_D1 hbr',
'S3_D1 hbb', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='naming conventions'):
_check_channels_ordered(raw, ["hbb", "hbr"])
# Check more weird naming
ch_names = ['S1_DX hbo', 'S1_DX hbr', 'S2_D1 hbo', 'S2_D1 hbr',
'S3_D1 hbo', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='can not be parsed'):
_check_channels_ordered(raw, ["hbo", "hbr"])
|
def test_fnirs_channel_naming_and_order_custom_chroma():
"""Ensure fNIRS channel checking on manually created data."""
data = np.random.RandomState(0).randn(6, 10)
# Start with a correctly named raw intensity dataset
# These are the steps required to build an fNIRS Raw object from scratch
ch_names = ['S1_D1 hbo', 'S1_D1 hbr', 'S2_D1 hbo', 'S2_D1 hbr',
'S3_D1 hbo', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
chroma = np.unique(_channel_chromophore(raw))
picks = _check_channels_ordered(raw, chroma)
assert len(picks) == len(raw.ch_names)
assert len(picks) == 6
# Test block creation fails
ch_names = ['S1_D1 hbo', 'S2_D1 hbo', 'S3_D1 hbo',
'S1_D1 hbr', 'S2_D1 hbr', 'S3_D1 hbr']
ch_types = np.repeat(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='not ordered .* chromophore'):
_check_channels_ordered(raw, ["hbo", "hbr"])
# Reordering should fix
raw.pick(picks=[0, 3, 1, 4, 2, 5])
_check_channels_ordered(raw, ["hbo", "hbr"])
# Wrong names should fail
with pytest.raises(ValueError, match='not ordered .* chromophore'):
_check_channels_ordered(raw, ["hbb", "hbr"])
# Test weird naming
ch_names = ['S1_D1 hbb', 'S1_D1 hbr', 'S2_D1 hbb', 'S2_D1 hbr',
'S3_D1 hbb', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='naming conventions'):
_check_channels_ordered(raw, ["hbb", "hbr"])
# Check more weird naming
ch_names = ['S1_DX hbo', 'S1_DX hbr', 'S2_D1 hbo', 'S2_D1 hbr',
'S3_D1 hbo', 'S3_D1 hbr']
ch_types = np.tile(["hbo", "hbr"], 3)
info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0)
raw = RawArray(data, info, verbose=True)
with pytest.raises(ValueError, match='can not be parsed'):
_check_channels_ordered(raw, ["hbo", "hbr"])
|
38,527 |
def rlencode(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Compress matrix by looking for identical columns.
Example usage: Convert the a full set of (row or column) indices of a
sparse matrix into compressed storage.
Acknowledgement: The code is heavily inspired by MRST's function with the
same name, however, requirements on the shape of functions are probably
somewhat different.
Parameters:
A (np.ndarray): Matrix to be compressed. Should be 2d. Compression
will be along the second axis.
Returns:
np.ndarray: The compressed array, size n x m.
np.ndarray: Number of times each row in the first output array should
be repeated to restore the original array.
See also:
rlencode
"""
comp = A[::, 0:-1] != A[::, 1::]
i = np.any(comp, axis=0)
i = np.hstack((np.argwhere(i).ravel(), (A.shape[1] - 1)))
num = np.diff(np.hstack((np.array([-1]), i)))
return A[::, i], num
|
def rlencode(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Compress matrix by looking for identical columns.
Example usage: Convert the a full set of (row or column) indices of a
sparse matrix into compressed storage.
Acknowledgement: The code is heavily inspired by MRST's function with the
same name, however, requirements on the shape of functions are probably
somewhat different.
Parameters:
A (np.ndarray): Matrix to be compressed. Should be 2d. Compression
will be along the second axis.
Returns:
np.ndarray: The compressed array, size n x m.
np.ndarray: Number of times each row in the first output array should
be repeated to restore the original array.
See also:
rldecode
"""
comp = A[::, 0:-1] != A[::, 1::]
i = np.any(comp, axis=0)
i = np.hstack((np.argwhere(i).ravel(), (A.shape[1] - 1)))
num = np.diff(np.hstack((np.array([-1]), i)))
return A[::, i], num
|
45,016 |
def trigger_job_run(
account_id: int,
job_id: int,
token: str,
cause: str,
domain: str,
additional_args: dict,
) -> dict:
"""
Trigger a dbt Cloud job run
Args:
- account_id (int): dbt Cloud account ID
- job_id (int): dbt Cloud job ID
- token (str): dbt Cloud token
- cause (str): the reason describing why the job run is being triggered
- domain (str): the domain the function should call, default cloud.getdbt.com
- additional_args (dict): additional information to pass to the Trigger Job Run API
Returns:
- The trigger run result, namely the "data" key in the API response
Raises:
- TriggerDbtCloudRunFailed: when the response code is != 200
"""
data = additional_args if additional_args else {}
data["cause"] = cause
trigger_request = requests.post(
url=__DBT_CLOUD_TRIGGER_JOB_API_ENDPOINT_V2.format(
accountId=account_id, jobId=job_id, apiDomain=domain
),
headers={"Authorization": f"Bearer {token}"},
data=data,
)
if trigger_request.status_code != 200:
raise TriggerDbtCloudRunFailed(trigger_request.reason)
return trigger_request.json()["data"]
|
def trigger_job_run(
account_id: int,
job_id: int,
token: str,
cause: str,
domain: str,
additional_args: dict,
) -> dict:
"""
Trigger a dbt Cloud job run
Args:
- account_id (int): dbt Cloud account ID
- job_id (int): dbt Cloud job ID
- token (str): dbt Cloud token
- cause (str): the reason describing why the job run is being triggered
- domain (str): The domain the function should call (e.g. `cloud.getdbt.com`).
- additional_args (dict): additional information to pass to the Trigger Job Run API
Returns:
- The trigger run result, namely the "data" key in the API response
Raises:
- TriggerDbtCloudRunFailed: when the response code is != 200
"""
data = additional_args if additional_args else {}
data["cause"] = cause
trigger_request = requests.post(
url=__DBT_CLOUD_TRIGGER_JOB_API_ENDPOINT_V2.format(
accountId=account_id, jobId=job_id, apiDomain=domain
),
headers={"Authorization": f"Bearer {token}"},
data=data,
)
if trigger_request.status_code != 200:
raise TriggerDbtCloudRunFailed(trigger_request.reason)
return trigger_request.json()["data"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.