id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
10,368 |
def delegate_remote(args, exclude, require, integration_targets):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:type integration_targets: tuple[IntegrationTarget]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
success = False
raw = False
if isinstance(args, ShellConfig):
use_httptester = args.httptester
raw = args.raw
else:
use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
if use_httptester and not docker_available():
display.warning('Assuming --disable-httptester since `docker` is not available.')
use_httptester = False
httptester_id = None
ssh_options = []
content_root = None
try:
core_ci.start()
if use_httptester:
httptester_id, ssh_options = start_httptester(args)
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
if platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
manage = ManageWindowsCI(core_ci)
manage.setup(python_version)
cmd = ['powershell.exe']
elif raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
cmd = create_shell_command(['bash'])
else:
manage = ManagePosixCI(core_ci)
pwd = manage.setup(python_version)
options = {
'--remote': 1,
}
python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
install_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(install_root, data_context().content.collection.directory)
else:
content_root = install_root
cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require)
if httptester_id:
cmd += ['--inject-httptester']
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
if isinstance(args, IntegrationConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
if platform != 'windows':
download = True
if isinstance(args, ShellConfig):
if args.raw:
download = False
if download and content_root:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_results_root = os.path.join(content_root, data_context().content.results_path)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_path = os.path.join('/tmp', remote_results_name)
# AIX cp and GNU cp provide different options, no way could be found to have a commen
# patttern and achieve the same goal
cp_opts = '-hr' if platform in ['aix', 'ibmi'] else '-a'
manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
manage.download(remote_temp_path, local_test_root)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
if httptester_id:
docker_rm(args, httptester_id)
|
def delegate_remote(args, exclude, require, integration_targets):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:type integration_targets: tuple[IntegrationTarget]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
success = False
raw = False
if isinstance(args, ShellConfig):
use_httptester = args.httptester
raw = args.raw
else:
use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
if use_httptester and not docker_available():
display.warning('Assuming --disable-httptester since `docker` is not available.')
use_httptester = False
httptester_id = None
ssh_options = []
content_root = None
try:
core_ci.start()
if use_httptester:
httptester_id, ssh_options = start_httptester(args)
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
if platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
manage = ManageWindowsCI(core_ci)
manage.setup(python_version)
cmd = ['powershell.exe']
elif raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
cmd = create_shell_command(['bash'])
else:
manage = ManagePosixCI(core_ci)
pwd = manage.setup(python_version)
options = {
'--remote': 1,
}
python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
install_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(install_root, data_context().content.collection.directory)
else:
content_root = install_root
cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require)
if httptester_id:
cmd += ['--inject-httptester']
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
if isinstance(args, IntegrationConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
if platform != 'windows':
download = True
if isinstance(args, ShellConfig):
if args.raw:
download = False
if download and content_root:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_results_root = os.path.join(content_root, data_context().content.results_path)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_path = os.path.join('/tmp', remote_results_name)
# AIX cp and GNU cp provide different options, no way could be found to have a commen
# pattern and achieve the same goal
cp_opts = '-hr' if platform in ['aix', 'ibmi'] else '-a'
manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
manage.download(remote_temp_path, local_test_root)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
if httptester_id:
docker_rm(args, httptester_id)
|
36,251 |
def check_presence_download_untar(filename: Path, tarfilename: Path, backup_url):
"""Check if file is present otherwise download and untar."""
if not filename.is_file():
if not tarfilename.is_file():
from .readwrite import _download
_download(backup_url, tarfilename)
import tarfile
tarfile.open(tarfilename).extractall(tarfilename.parents[0])
|
def check_presence_download_untar(filename: Path, tarfilename: Path, backup_url: str):
"""Check if file is present otherwise download and untar."""
if not filename.is_file():
if not tarfilename.is_file():
from .readwrite import _download
_download(backup_url, tarfilename)
import tarfile
tarfile.open(tarfilename).extractall(tarfilename.parents[0])
|
31,984 |
def create_dbot_score_from_url_verdict(pretty_verdict):
if pretty_verdict["Verdict"] not in VERDICTS_TO_DBOTSCORE:
raise Exception('This URL verdict is not mapped to a DBotScore. Contact Demisto support for more information.')
dbot_score = [
{'Indicator': pretty_verdict['URL'],
'Type': 'url',
'Vendor': 'WildFire',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict['Verdict']],
'Reliability': RELIABILITY
}
]
return dbot_score
|
def create_dbot_score_from_url_verdict(pretty_verdict):
if pretty_verdict["Verdict"] not in VERDICTS_TO_DBOTSCORE:
raise Exception('This URL verdict is not mapped to a DBotScore. Contact Demisto support for more information.')
dbot_score = [
{'Indicator': pretty_verdict['URL'],
'Type': 'url',
'Vendor': 'WildFire-v2',
'Score': VERDICTS_TO_DBOTSCORE[pretty_verdict['Verdict']],
'Reliability': RELIABILITY
}
]
return dbot_score
|
23,192 |
def _register_completer(name: str, func: str, pos="start", stack=None):
"""adds a new completer to xonsh
Parameters
----------
name
unique name to use in the listing (run "completer list" to see the
current completers in order)
func
the name of a completer function to use. This should be a function
of the following arguments, and should return a set of valid completions
for the given prefix. If this completer should not be used in a given
context, it should return an empty set or None.
Arguments to FUNC:
* prefix: the string to be matched
* line: a string representing the whole current line, for context
* begidx: the index at which prefix starts in line
* endidx: the index at which prefix ends in line
* ctx: the current Python environment
If the completer expands the prefix in any way, it should return a tuple
of two elements: the first should be the set of completions, and the
second should be the length of the modified prefix (for an example, see
xonsh.completers.path.complete_path).
pos
position into the list of completers at which the new
completer should be added. It can be one of the following values:
* "start" indicates that the completer should be added to the start of
the list of completers (it should be run before all others)
* "end" indicates that the completer should be added to the end of the
list of completers (it should be run after all others)
* ">KEY", where KEY is a pre-existing name, indicates that this should
be added after the completer named KEY
* "<KEY", where KEY is a pre-existing name, indicates that this should
be added before the completer named KEY
(Default value: "start")
"""
err = None
func_name = func
xsh = builtins.__xonsh__ # type: ignore
if name in xsh.completers:
err = "The name %s is already a registered completer function." % name
else:
if func_name in xsh.ctx:
func = xsh.ctx[func_name]
if not callable(func):
err = "%s is not callable" % func_name
else:
for frame_info in stack:
frame = frame_info[0]
if func_name in frame.f_locals:
func = frame.f_locals[func_name]
break
elif func_name in frame.f_globals:
func = frame.f_globals[func_name]
break
else:
err = "No such function: %s" % func_name
if err is None:
_add_one_completer(name, func, pos)
else:
return None, err + "\n", 1
|
def _register_completer(name: str, func: str, pos="start", stack=None):
"""adds a new completer to xonsh
Parameters
----------
name
unique name to use in the listing (run "completer list" to see the
current completers in order)
func
the name of a completer function to use. This should be a function
of the following arguments, and should return a set of valid completions
for the given prefix. If this completer should not be used in a given
context, it should return an empty set or None.
Arguments to FUNC:
* prefix: the string to be matched
* line: a string representing the whole current line, for context
* begidx: the index at which prefix starts in line
* endidx: the index at which prefix ends in line
* ctx: the current Python environment
If the completer expands the prefix in any way, it should return a tuple
of two elements: the first should be the set of completions, and the
second should be the length of the modified prefix (for an example, see
xonsh.completers.path.complete_path).
pos
position into the list of completers at which the new
completer should be added. It can be one of the following values:
* "start" indicates that the completer should be added to the start of
the list of completers (it should be run before all others)
* "end" indicates that the completer should be added to the end of the
list of completers (it should be run after all others)
* ">KEY", where KEY is a pre-existing name, indicates that this should
be added after the completer named KEY
* "<KEY", where KEY is a pre-existing name, indicates that this should
be added before the completer named KEY
(Default value: "start")
"""
err = None
func_name = func
xsh = builtins.__xonsh__ # type: ignore
if name in xsh.completers:
err = "The name %s is already a registered completer function." % name
else:
if func_name in xsh.ctx:
func = xsh.ctx[func_name]
if not callable(func):
err = f"{func_name} is not callable"
else:
for frame_info in stack:
frame = frame_info[0]
if func_name in frame.f_locals:
func = frame.f_locals[func_name]
break
elif func_name in frame.f_globals:
func = frame.f_globals[func_name]
break
else:
err = "No such function: %s" % func_name
if err is None:
_add_one_completer(name, func, pos)
else:
return None, err + "\n", 1
|
32,857 |
def get_traced_cache(ddtracer, service=DEFAULT_SERVICE, meta=None, cache_cls=None):
"""
Return a traced Cache object that behaves exactly as ``cache_cls``.
``cache_cls`` defaults to ``flask.ext.cache.Cache`` if Flask-Cache is installed
or ``flask_caching.Cache`` if flask-caching is installed.
"""
if cache_cls is None:
# for compatibility reason, first check if flask_cache is present
try:
from flask.ext.cache import Cache
cache_cls = Cache
except ImportError:
# use flask_caching if flask_cache if not
from flask_caching import Cache
cache_cls = Cache
class TracedCache(cache_cls):
"""
Traced cache backend that monitors any operations done by flask_cache. Observed actions are:
* get, set, add, delete, clear
* all ``many_`` operations
"""
_datadog_tracer = ddtracer
_datadog_service = service
_datadog_meta = meta
def __trace(self, cmd, write=False):
"""
Start a tracing with default attributes and tags
"""
# create a new span
s = self._datadog_tracer.trace(cmd, span_type=SpanTypes.CACHE, service=self._datadog_service)
s.set_tag(SPAN_MEASURED_KEY)
# set span tags
s.set_tag(CACHE_BACKEND, self.config.get("CACHE_TYPE"))
s.set_tags(self._datadog_meta)
# set analytics sample rate
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.flask_cache.get_analytics_sample_rate())
# add connection meta if there is one
client = _extract_client(self.cache)
if client is not None:
try:
s.set_tags(_extract_conn_tags(client))
except Exception:
log.debug("error parsing connection tags", exc_info=True)
return s
def get(self, *args, **kwargs):
"""
Track ``get`` operation
"""
with self.__trace("flask_cache.cmd") as span:
span.resource = _resource_from_cache_prefix("GET", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).get(*args, **kwargs)
def set(self, *args, **kwargs):
"""
Track ``set`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("SET", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).set(*args, **kwargs)
def add(self, *args, **kwargs):
"""
Track ``add`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("ADD", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).add(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
Track ``delete`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("DELETE", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).delete(*args, **kwargs)
def delete_many(self, *args, **kwargs):
"""
Track ``delete_many`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("DELETE_MANY", self.config)
span.set_tag(COMMAND_KEY, list(args))
return super(TracedCache, self).delete_many(*args, **kwargs)
def clear(self, *args, **kwargs):
"""
Track ``clear`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("CLEAR", self.config)
return super(TracedCache, self).clear(*args, **kwargs)
def get_many(self, *args, **kwargs):
"""
Track ``get_many`` operation
"""
with self.__trace("flask_cache.cmd") as span:
span.resource = _resource_from_cache_prefix("GET_MANY", self.config)
span.set_tag(COMMAND_KEY, list(args))
return super(TracedCache, self).get_many(*args, **kwargs)
def set_many(self, *args, **kwargs):
"""
Track ``set_many`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("SET_MANY", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, list(args[0].keys()))
return super(TracedCache, self).set_many(*args, **kwargs)
return TracedCache
|
def get_traced_cache(ddtracer, service=DEFAULT_SERVICE, meta=None, cache_cls=None):
"""
Return a traced Cache object that behaves exactly as ``cache_cls``.
``cache_cls`` defaults to ``flask.ext.cache.Cache`` if Flask-Cache is installed
or ``flask_caching.Cache`` if flask-caching is installed.
"""
if cache_cls is None:
# for compatibility reason, first check if flask_cache is present
try:
from flask.ext.cache import Cache
cache_cls = Cache
except ImportError:
# use flask_caching if flask_cache is not present
from flask_caching import Cache
cache_cls = Cache
class TracedCache(cache_cls):
"""
Traced cache backend that monitors any operations done by flask_cache. Observed actions are:
* get, set, add, delete, clear
* all ``many_`` operations
"""
_datadog_tracer = ddtracer
_datadog_service = service
_datadog_meta = meta
def __trace(self, cmd, write=False):
"""
Start a tracing with default attributes and tags
"""
# create a new span
s = self._datadog_tracer.trace(cmd, span_type=SpanTypes.CACHE, service=self._datadog_service)
s.set_tag(SPAN_MEASURED_KEY)
# set span tags
s.set_tag(CACHE_BACKEND, self.config.get("CACHE_TYPE"))
s.set_tags(self._datadog_meta)
# set analytics sample rate
s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.flask_cache.get_analytics_sample_rate())
# add connection meta if there is one
client = _extract_client(self.cache)
if client is not None:
try:
s.set_tags(_extract_conn_tags(client))
except Exception:
log.debug("error parsing connection tags", exc_info=True)
return s
def get(self, *args, **kwargs):
"""
Track ``get`` operation
"""
with self.__trace("flask_cache.cmd") as span:
span.resource = _resource_from_cache_prefix("GET", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).get(*args, **kwargs)
def set(self, *args, **kwargs):
"""
Track ``set`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("SET", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).set(*args, **kwargs)
def add(self, *args, **kwargs):
"""
Track ``add`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("ADD", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).add(*args, **kwargs)
def delete(self, *args, **kwargs):
"""
Track ``delete`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("DELETE", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, args[0])
return super(TracedCache, self).delete(*args, **kwargs)
def delete_many(self, *args, **kwargs):
"""
Track ``delete_many`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("DELETE_MANY", self.config)
span.set_tag(COMMAND_KEY, list(args))
return super(TracedCache, self).delete_many(*args, **kwargs)
def clear(self, *args, **kwargs):
"""
Track ``clear`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("CLEAR", self.config)
return super(TracedCache, self).clear(*args, **kwargs)
def get_many(self, *args, **kwargs):
"""
Track ``get_many`` operation
"""
with self.__trace("flask_cache.cmd") as span:
span.resource = _resource_from_cache_prefix("GET_MANY", self.config)
span.set_tag(COMMAND_KEY, list(args))
return super(TracedCache, self).get_many(*args, **kwargs)
def set_many(self, *args, **kwargs):
"""
Track ``set_many`` operation
"""
with self.__trace("flask_cache.cmd", write=True) as span:
span.resource = _resource_from_cache_prefix("SET_MANY", self.config)
if len(args) > 0:
span.set_tag(COMMAND_KEY, list(args[0].keys()))
return super(TracedCache, self).set_many(*args, **kwargs)
return TracedCache
|
6,636 |
def get_leaves_for_period(employee, leave_type, from_date, to_date, do_not_skip_expired_leaves=False):
leave_entries = get_leave_entries(employee, leave_type, from_date, to_date)
leave_days = 0
for leave_entry in leave_entries:
inclusive_period = leave_entry.from_date >= getdate(from_date) and leave_entry.to_date <= getdate(to_date)
if inclusive_period and leave_entry.transaction_type == 'Leave Encashment':
leave_days += leave_entry.leaves
elif inclusive_period and leave_entry.transaction_type == 'Leave Allocation' and leave_entry.is_expired \
and (do_not_skip_expired_leaves or not skip_expiry_leaves(leave_entry, to_date)):
leave_days += leave_entry.leaves
elif leave_entry.transaction_type == 'Leave Application':
if leave_entry.from_date < getdate(from_date):
leave_entry.from_date = from_date
if leave_entry.to_date > getdate(to_date):
leave_entry.to_date = to_date
half_day = 0
half_day_date = None
# fetch half day date for leaves with half days
if leave_entry.leaves % 1:
half_day = 1
half_day_date = frappe.db.get_value('Leave Application',
{'name': leave_entry.transaction_name}, ['half_day_date'])
leave_days += get_number_of_leave_days(employee, leave_type,
leave_entry.from_date, leave_entry.to_date, half_day, half_day_date, holiday_list=leave_entry.holiday_list) * -1
return leave_days
|
def get_leaves_for_period(employee, leave_type, from_date, to_date, do_not_skip_expired_leaves=False):
leave_entries = get_leave_entries(employee, leave_type, from_date, to_date)
leave_days = 0
for leave_entry in leave_entries:
inclusive_period = leave_entry.from_date >= getdate(from_date) and leave_entry.to_date <= getdate(to_date)
if inclusive_period and leave_entry.transaction_type == 'Leave Encashment':
leave_days += leave_entry.leaves
elif inclusive_period and leave_entry.transaction_type == 'Leave Allocation' and leave_entry.is_expired \
and (do_not_skip_expired_leaves or not skip_expiry_leaves(leave_entry, to_date)):
leave_days += leave_entry.leaves
elif leave_entry.transaction_type == 'Leave Application':
if leave_entry.from_date < getdate(from_date):
leave_entry.from_date = from_date
if leave_entry.to_date > getdate(to_date):
leave_entry.to_date = to_date
half_day = 0
half_day_date = None
# fetch half day date for leaves with half days
if leave_entry.leaves % 1:
half_day = 1
half_day_date = frappe.db.get_value('Leave Application',
{'name': leave_entry.transaction_name}, ['half_day_date'])
leave_days += get_number_of_leave_days(employee, leave_type,
leave_entry.from_date, leave_entry.to_date, half_day, half_day_date, holiday_list=leave_entry.holiday_list) * -1
return leave_days
|
39,675 |
def main():
module = ForemanEntityAnsibleModule(
entity_spec=dict(
name=dict(required=True),
host=dict(required=True),
port=dict(type='int', default=389),
account=dict(),
account_password=dict(no_log=True),
base_dn=dict(),
attr_login=dict(),
attr_firstname=dict(),
attr_lastname=dict(),
attr_mail=dict(),
attr_photo=dict(),
onthefly_register=dict(type='bool'),
usergroup_sync=dict(type='bool'),
tls=dict(type='bool'),
groups_base=dict(),
server_type=dict(choices=["free_ipa", "active_directory", "posix"]),
ldap_filter=dict(),
locations=dict(type='entity_list', flat_name='location_ids'),
organizations=dict(type='entity_list', flat_name='organization_ids'),
use_netgroups=dict(type='bool', default=False),
),
)
entity_dict = module.clean_params()
module.connect()
entity = module.find_resource_by_name('auth_source_ldaps', name=entity_dict['name'], failsafe=True)
if not module.desired_absent:
if 'locations' in entity_dict:
entity_dict['locations'] = module.find_resources_by_title('locations', entity_dict['locations'], thin=True)
if 'organizations' in entity_dict:
entity_dict['organizations'] = module.find_resources_by_name('organizations', entity_dict['organizations'], thin=True)
changed = module.ensure_entity_state('auth_source_ldaps', entity_dict, entity)
module.exit_json(changed=changed)
|
def main():
module = ForemanEntityAnsibleModule(
entity_spec=dict(
name=dict(required=True),
host=dict(required=True),
port=dict(type='int', default=389),
account=dict(),
account_password=dict(no_log=True),
base_dn=dict(),
attr_login=dict(),
attr_firstname=dict(),
attr_lastname=dict(),
attr_mail=dict(),
attr_photo=dict(),
onthefly_register=dict(type='bool'),
usergroup_sync=dict(type='bool'),
tls=dict(type='bool'),
groups_base=dict(),
server_type=dict(choices=["free_ipa", "active_directory", "posix"]),
ldap_filter=dict(),
locations=dict(type='entity_list', flat_name='location_ids'),
organizations=dict(type='entity_list', flat_name='organization_ids'),
use_netgroups=dict(type='bool'),
),
)
entity_dict = module.clean_params()
module.connect()
entity = module.find_resource_by_name('auth_source_ldaps', name=entity_dict['name'], failsafe=True)
if not module.desired_absent:
if 'locations' in entity_dict:
entity_dict['locations'] = module.find_resources_by_title('locations', entity_dict['locations'], thin=True)
if 'organizations' in entity_dict:
entity_dict['organizations'] = module.find_resources_by_name('organizations', entity_dict['organizations'], thin=True)
changed = module.ensure_entity_state('auth_source_ldaps', entity_dict, entity)
module.exit_json(changed=changed)
|
44,756 |
def _create_dockerfile(output_path, mlflow_path=None):
"""
Creates a Dockerfile containing additional Docker build steps to execute
when building the Azure container image. These build steps perform the following tasks:
- Install MLflow
:param output_path: The path where the Dockerfile will be written.
:param mlflow_path: Path to a local copy of the MLflow GitHub repository. If specified, the
Dockerfile command for MLflow installation will install MLflow from this
directory. Otherwise, it will install MLflow from pip.
"""
docker_cmds = ["RUN apt-get update && apt-get install default-jre"]
docker_cmds.append("RUN pip install azureml-sdk")
if mlflow_path is not None:
mlflow_install_cmd = "RUN pip install -e {mlflow_path}".format(
mlflow_path=_get_container_path(mlflow_path))
elif not mlflow_version.endswith("dev"):
mlflow_install_cmd = "RUN pip install mlflow=={mlflow_version}".format(
mlflow_version=mlflow_version)
else:
raise MlflowException(
"You are running a 'dev' version of MLflow: `{mlflow_version}` that cannot be"
" installed from pip. In order to build a container image, either specify the"
" path to a local copy of the MLflow GitHub repository using the `mlflow_home`"
" parameter or install a release version of MLflow from pip".format(
mlflow_version=mlflow_version))
docker_cmds.append(mlflow_install_cmd)
with open(output_path, "w") as f:
f.write("\n".join(docker_cmds))
|
def _create_dockerfile(output_path, mlflow_path=None):
"""
Creates a Dockerfile containing additional Docker build steps to execute
when building the Azure container image. These build steps perform the following tasks:
- Install MLflow
:param output_path: The path where the Dockerfile will be written.
:param mlflow_path: Path to a local copy of the MLflow GitHub repository. If specified, the
Dockerfile command for MLflow installation will install MLflow from this
directory. Otherwise, it will install MLflow from pip.
"""
docker_cmds = ["RUN apt-get update && apt-get install -y default-jre"]
docker_cmds.append("RUN pip install azureml-sdk")
if mlflow_path is not None:
mlflow_install_cmd = "RUN pip install -e {mlflow_path}".format(
mlflow_path=_get_container_path(mlflow_path))
elif not mlflow_version.endswith("dev"):
mlflow_install_cmd = "RUN pip install mlflow=={mlflow_version}".format(
mlflow_version=mlflow_version)
else:
raise MlflowException(
"You are running a 'dev' version of MLflow: `{mlflow_version}` that cannot be"
" installed from pip. In order to build a container image, either specify the"
" path to a local copy of the MLflow GitHub repository using the `mlflow_home`"
" parameter or install a release version of MLflow from pip".format(
mlflow_version=mlflow_version))
docker_cmds.append(mlflow_install_cmd)
with open(output_path, "w") as f:
f.write("\n".join(docker_cmds))
|
45,723 |
def reprojection(R_src, R_dst):
"""Reprojects precipitation fields to the domain of another precipiation
field.
Parameters
----------
R_src: xarray
Three-dimensional xarray with dimensions (t, x, y) containing a
time series of precipitation fields. These precipitaiton fields
will be reprojected.
R_dst: xarray
Xarray containing a precipitation field or a time series of precipitation
fields. The xarray R_src will be reprojected to the domain of R_dst.
Returns
-------
R_rprj: xarray
Three-dimensional xarray with dimensions (t, x, y) containing the
precipitation fields of R_src, but reprojected to the domain of
R_dst.
"""
# Extract the grid info from R_src
src_crs = R_src.attrs["projection"]
x1_src = R_src.x.attrs["x1"]
y2_src = R_src.y.attrs["y2"]
xpixelsize_src = R_src.attrs["xpixelsize"]
ypixelsize_src = R_src.attrs["ypixelsize"]
src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale(
float(xpixelsize_src), float(-ypixelsize_src)
)
# Extract the grid info from R_dst
dst_crs = R_dst.attrs["projection"]
x1_dst = R_dst.x.attrs["x1"]
y2_dst = R_dst.y.attrs["y2"]
xpixelsize_dst = R_dst.attrs["xpixelsize"]
ypixelsize_dst = R_dst.attrs["ypixelsize"]
dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale(
float(xpixelsize_dst), float(-ypixelsize_dst)
)
# Initialise the reprojected (x)array
R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1]))
# For every timestep, reproject the precipitation field of R_src to
# the domain of R_dst
if R_src.attrs["yorigin"] != R_dst.attrs["yorigin"]:
R_src = R_src[:, ::-1, :]
for i in range(R_src.shape[0]):
reproject(
R_src.values[i, :, :],
R_rprj[i, :, :],
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=Resampling.nearest,
dst_nodata=np.nan,
)
# Assign the necessary attributes from R_src and R_dst to R_rprj
R_rprj = xr.DataArray(
data=R_rprj,
dims=("t", "y", "x"),
coords=dict(
t=("t", R_src.coords["t"].data),
x=("x", R_dst.coords["x"].data),
y=("y", R_dst.coords["y"].data),
),
)
R_rprj.attrs.update(R_src.attrs)
R_rprj.x.attrs.update(R_dst.x.attrs)
R_rprj.y.attrs.update(R_dst.y.attrs)
for key in ["projection", "yorigin", "xpixelsize", "ypixelsize"]:
R_rprj.attrs[key] = R_dst.attrs[key]
return R_rprj
|
def reprojection(R_src, R_dst):
"""Reprojects precipitation fields to the domain of another precipitation
field.
Parameters
----------
R_src: xarray
Three-dimensional xarray with dimensions (t, x, y) containing a
time series of precipitation fields. These precipitaiton fields
will be reprojected.
R_dst: xarray
Xarray containing a precipitation field or a time series of precipitation
fields. The xarray R_src will be reprojected to the domain of R_dst.
Returns
-------
R_rprj: xarray
Three-dimensional xarray with dimensions (t, x, y) containing the
precipitation fields of R_src, but reprojected to the domain of
R_dst.
"""
# Extract the grid info from R_src
src_crs = R_src.attrs["projection"]
x1_src = R_src.x.attrs["x1"]
y2_src = R_src.y.attrs["y2"]
xpixelsize_src = R_src.attrs["xpixelsize"]
ypixelsize_src = R_src.attrs["ypixelsize"]
src_transform = A.translation(float(x1_src), float(y2_src)) * A.scale(
float(xpixelsize_src), float(-ypixelsize_src)
)
# Extract the grid info from R_dst
dst_crs = R_dst.attrs["projection"]
x1_dst = R_dst.x.attrs["x1"]
y2_dst = R_dst.y.attrs["y2"]
xpixelsize_dst = R_dst.attrs["xpixelsize"]
ypixelsize_dst = R_dst.attrs["ypixelsize"]
dst_transform = A.translation(float(x1_dst), float(y2_dst)) * A.scale(
float(xpixelsize_dst), float(-ypixelsize_dst)
)
# Initialise the reprojected (x)array
R_rprj = np.zeros((R_src.shape[0], R_dst.shape[-2], R_dst.shape[-1]))
# For every timestep, reproject the precipitation field of R_src to
# the domain of R_dst
if R_src.attrs["yorigin"] != R_dst.attrs["yorigin"]:
R_src = R_src[:, ::-1, :]
for i in range(R_src.shape[0]):
reproject(
R_src.values[i, :, :],
R_rprj[i, :, :],
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=Resampling.nearest,
dst_nodata=np.nan,
)
# Assign the necessary attributes from R_src and R_dst to R_rprj
R_rprj = xr.DataArray(
data=R_rprj,
dims=("t", "y", "x"),
coords=dict(
t=("t", R_src.coords["t"].data),
x=("x", R_dst.coords["x"].data),
y=("y", R_dst.coords["y"].data),
),
)
R_rprj.attrs.update(R_src.attrs)
R_rprj.x.attrs.update(R_dst.x.attrs)
R_rprj.y.attrs.update(R_dst.y.attrs)
for key in ["projection", "yorigin", "xpixelsize", "ypixelsize"]:
R_rprj.attrs[key] = R_dst.attrs[key]
return R_rprj
|
40,348 |
def to_captum(model: torch.nn.Module, mask_type: str = "edge",
node_idx: Optional[int] = None):
"""Convert a model to a model that can be used for Captum explainers."""
return CaptumModel(model, mask_type, node_idx)
|
def to_captum(model: torch.nn.Module, mask_type: str = "edge",
node_idx: Optional[int] = None) -> torch.nn.Module:
"""Convert a model to a model that can be used for Captum explainers."""
return CaptumModel(model, mask_type, node_idx)
|
41,717 |
def create_study(
storage=None, # type: Union[None, str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
study_name=None, # type: Optional[str]
direction='minimize', # type: str
load_if_exists=False, # type: bool
force_garbage_collection=True, # type: bool
):
# type: (...) -> Study
"""Create a new :class:`~optuna.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~optuna.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion.
If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used
as the default. See also :class:`~optuna.samplers`.
pruner:
A pruner object that decides early stopping of unpromising trials. See also
:class:`~optuna.pruners`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
direction:
Direction of optimization. Set ``minimize`` for minimization and ``maximize`` for
maximization.
load_if_exists:
Flag to control the behavior to handle a conflict of study names.
In the case where a study named ``study_name`` already exists in the ``storage``,
a :class:`~optuna.structs.DuplicatedStudyError` is raised if ``load_if_exists`` is
set to :obj:`False`.
Otherwise, the creation of the study is skipped, and the existing one is returned.
force_garbage_collection:
Flag to force gc.collect() for every trial.
Returns:
A :class:`~optuna.study.Study` object.
"""
storage = storages.get_storage(storage)
try:
study_id = storage.create_new_study(study_name)
except structs.DuplicatedStudyError:
if load_if_exists:
assert study_name is not None
logger = logging.get_logger(__name__)
logger.info("Using an existing study with name '{}' instead of "
"creating a new one.".format(study_name))
study_id = storage.get_study_id_from_name(study_name)
else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sampler=sampler,
pruner=pruner,
force_garbage_collection=force_garbage_collection)
if direction == 'minimize':
_direction = structs.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = structs.StudyDirection.MAXIMIZE
else:
raise ValueError('Please set either \'minimize\' or \'maximize\' to direction.')
study._storage.set_study_direction(study_id, _direction)
return study
|
def create_study(
storage=None, # type: Union[None, str, storages.BaseStorage]
sampler=None, # type: samplers.BaseSampler
pruner=None, # type: pruners.BasePruner
study_name=None, # type: Optional[str]
direction='minimize', # type: str
load_if_exists=False, # type: bool
force_garbage_collection=True, # type: bool
):
# type: (...) -> Study
"""Create a new :class:`~optuna.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~optuna.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion.
If :obj:`None` is specified, :class:`~optuna.samplers.TPESampler` is used
as the default. See also :class:`~optuna.samplers`.
pruner:
A pruner object that decides early stopping of unpromising trials. See also
:class:`~optuna.pruners`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
direction:
Direction of optimization. Set ``minimize`` for minimization and ``maximize`` for
maximization.
load_if_exists:
Flag to control the behavior to handle a conflict of study names.
In the case where a study named ``study_name`` already exists in the ``storage``,
a :class:`~optuna.structs.DuplicatedStudyError` is raised if ``load_if_exists`` is
set to :obj:`False`.
Otherwise, the creation of the study is skipped, and the existing one is returned.
force_garbage_collection:
Flag to force ``gc.collect()`` for every trial.
Returns:
A :class:`~optuna.study.Study` object.
"""
storage = storages.get_storage(storage)
try:
study_id = storage.create_new_study(study_name)
except structs.DuplicatedStudyError:
if load_if_exists:
assert study_name is not None
logger = logging.get_logger(__name__)
logger.info("Using an existing study with name '{}' instead of "
"creating a new one.".format(study_name))
study_id = storage.get_study_id_from_name(study_name)
else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sampler=sampler,
pruner=pruner,
force_garbage_collection=force_garbage_collection)
if direction == 'minimize':
_direction = structs.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = structs.StudyDirection.MAXIMIZE
else:
raise ValueError('Please set either \'minimize\' or \'maximize\' to direction.')
study._storage.set_study_direction(study_id, _direction)
return study
|
12,026 |
def load_meshes(uris, var_name=None):
"""
Create :class:`Mesh` objects from one or more NetCDF files.
Parameters
----------
uris : str or iterable of str
One or more filenames/URI's. Any URI's must support OpenDAP.
var_name : str, optional
Only return a :class:`Mesh` if its var_name matches this value.
Returns
-------
dict
A dictionary of file paths/URL's and lists of the :class:`Mesh`es
returned from each.
"""
# No constraints or callbacks supported - these assume they are operating
# on a Cube.
from iris.fileformats import FORMAT_AGENT
# TODO: rationalise UGRID/mesh handling once experimental.ugrid is folded
# into standard behaviour.
if not PARSE_UGRID_ON_LOAD:
# Explicit behaviour, consistent with netcdf.load_cubes(), rather than
# an invisible assumption.
message = (
f"PARSE_UGRID_ON_LOAD is {bool(PARSE_UGRID_ON_LOAD)}. Must be "
f"True to enable mesh loading."
)
raise ValueError(message)
if isinstance(uris, str):
uris = [uris]
# Group collections of uris by their iris handler
# Create list of tuples relating schemes to part names.
uri_tuples = sorted(decode_uri(uri) for uri in uris)
valid_sources = []
for scheme, groups in groupby(uri_tuples, key=lambda x: x[0]):
# Call each scheme handler with the appropriate URIs
if scheme == "file":
filenames = [x[1] for x in groups]
sources = expand_filespecs(filenames)
elif scheme in ["http", "https"]:
sources = [":".join(x) for x in groups]
else:
message = f"Iris cannot handle the URI scheme: {scheme}"
raise ValueError(message)
for source in sources:
if scheme == "file":
with open(source, "rb") as fh:
handling_format_spec = FORMAT_AGENT.get_spec(
Path(source).name, fh
)
else:
handling_format_spec = FORMAT_AGENT.get_spec(source, None)
if handling_format_spec.handler == netcdf.load_cubes:
valid_sources.append(source)
else:
message = f"Ignoring non-NetCDF file: {source}"
logger.info(msg=message, extra=dict(cls=None))
result = {}
for source in valid_sources:
meshes_dict = _meshes_from_cf(CFUGridReader(source))
meshes = list(meshes_dict.values())
if var_name is not None:
meshes = list(filter(lambda m: m.var_name == var_name, meshes))
if meshes:
result[source] = meshes
return result
|
def load_meshes(uris, var_name=None):
"""
Create :class:`Mesh` objects from one or more NetCDF files.
Parameters
----------
uris : str or iterable of str
One or more filenames/URI's. Any URI's must support OpenDAP.
var_name : str, optional
Only return :class:`Mesh`'s whose var_name matches this.
Returns
-------
dict
A dictionary of file paths/URL's and lists of the :class:`Mesh`es
returned from each.
"""
# No constraints or callbacks supported - these assume they are operating
# on a Cube.
from iris.fileformats import FORMAT_AGENT
# TODO: rationalise UGRID/mesh handling once experimental.ugrid is folded
# into standard behaviour.
if not PARSE_UGRID_ON_LOAD:
# Explicit behaviour, consistent with netcdf.load_cubes(), rather than
# an invisible assumption.
message = (
f"PARSE_UGRID_ON_LOAD is {bool(PARSE_UGRID_ON_LOAD)}. Must be "
f"True to enable mesh loading."
)
raise ValueError(message)
if isinstance(uris, str):
uris = [uris]
# Group collections of uris by their iris handler
# Create list of tuples relating schemes to part names.
uri_tuples = sorted(decode_uri(uri) for uri in uris)
valid_sources = []
for scheme, groups in groupby(uri_tuples, key=lambda x: x[0]):
# Call each scheme handler with the appropriate URIs
if scheme == "file":
filenames = [x[1] for x in groups]
sources = expand_filespecs(filenames)
elif scheme in ["http", "https"]:
sources = [":".join(x) for x in groups]
else:
message = f"Iris cannot handle the URI scheme: {scheme}"
raise ValueError(message)
for source in sources:
if scheme == "file":
with open(source, "rb") as fh:
handling_format_spec = FORMAT_AGENT.get_spec(
Path(source).name, fh
)
else:
handling_format_spec = FORMAT_AGENT.get_spec(source, None)
if handling_format_spec.handler == netcdf.load_cubes:
valid_sources.append(source)
else:
message = f"Ignoring non-NetCDF file: {source}"
logger.info(msg=message, extra=dict(cls=None))
result = {}
for source in valid_sources:
meshes_dict = _meshes_from_cf(CFUGridReader(source))
meshes = list(meshes_dict.values())
if var_name is not None:
meshes = list(filter(lambda m: m.var_name == var_name, meshes))
if meshes:
result[source] = meshes
return result
|
51,786 |
def install(parser, args, **kwargs):
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
'''))
arguments.add_cdash_args(parser, True)
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
else:
return False
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
tests = get_tests(env.user_specs)
kwargs['tests'] = tests
if not args.only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(args, **kwargs)
tty.debug("Regenerating environment views for {0}"
.format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
if args.deprecated:
spack.config.set('config:deprecated', True, scope='command_line')
# Parse cli arguments and construct a dictionary
# that will be passed to the package installer
update_kwargs_from_args(args, kwargs)
# 1. Abstract specs from cli
abstract_specs = spack.cmd.parse_specs(args.spec)
tests = get_tests(abstract_specs)
kwargs['tests'] = tests
try:
specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=tests)
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
raise
# 2. Concrete specs from yaml files
for file in args.specfiles:
with open(file, 'r') as f:
if file.endswith('json'):
s = spack.spec.Spec.from_json(f)
elif file.endswith('yaml'):
s = spack.spec.Spec.from_yaml(f)
concretized = s.concretized()
tty.debug('reconcretized dag hash:', concretized.dag_hash())
tty.debug('original dag hash:', s.dag_hash())
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += 'The file does not contain a concrete spec.'
tty.warn(msg.format(file))
continue
abstract_specs.append(s)
specs.append(concretized)
if len(specs) == 0:
tty.die('The `spack install` command requires a spec to install.')
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
with reporter('build'):
if args.overwrite:
installed = list(filter(lambda x: x,
map(spack.store.db.query_one, specs)))
if not args.yes_to_all:
display_args = {
'long': True,
'show_flags': True,
'variants': True
}
if installed:
tty.msg('The following package specs will be '
'reinstalled:\n')
spack.cmd.display_specs(installed, **display_args)
not_installed = list(filter(lambda x: x not in installed,
specs))
if not_installed:
tty.msg('The following package specs are not installed and'
' the --overwrite flag was given. The package spec'
' will be newly installed:\n')
spack.cmd.display_specs(not_installed, **display_args)
# We have some specs, so one of the above must have been true
answer = tty.get_yes_or_no(
'Do you want to proceed?', default=False
)
if not answer:
tty.die('Reinstallation aborted.')
# overwrite all concrete explicit specs from this build
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
# Update install_args with the monitor args, needed for build task
kwargs.update({
"monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
# If we are using the monitor, we send configs. and create build
# The full_hash is the main package id, the build_hash for others
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))
|
def install(parser, args, **kwargs):
if args.help_cdash:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''\
environment variables:
SPACK_CDASH_AUTH_TOKEN
authentication token to present to CDash
'''))
arguments.add_cdash_args(parser, True)
parser.print_help()
return
# The user wants to monitor builds using github.com/spack/spack-monitor
if args.use_monitor:
monitor = spack.monitor.get_client(
host=args.monitor_host,
prefix=args.monitor_prefix,
disable_auth=args.monitor_disable_auth,
tags=args.monitor_tags,
save_local=args.monitor_save_local,
)
reporter = spack.report.collect_info(
spack.package.PackageInstaller, '_install_task', args.log_format, args)
if args.log_file:
reporter.filename = args.log_file
if args.run_tests:
tty.warn("Deprecated option: --run-tests: use --test=all instead")
def get_tests(specs):
if args.test == 'all' or args.run_tests:
return True
elif args.test == 'root':
return [spec.name for spec in specs]
else:
return False
if not args.spec and not args.specfiles:
# if there are no args but an active environment
# then install the packages from it.
env = ev.get_env(args, 'install')
if env:
tests = get_tests(env.user_specs)
kwargs['tests'] = tests
if not args.only_concrete:
with env.write_transaction():
concretized_specs = env.concretize(tests=tests)
ev.display_specs(concretized_specs)
# save view regeneration for later, so that we only do it
# once, as it can be slow.
env.write(regenerate=False)
specs = env.all_specs()
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
# Tell the monitor about the specs
if args.use_monitor and specs:
monitor.new_configuration(specs)
tty.msg("Installing environment {0}".format(env.name))
with reporter('build'):
env.install_all(args, **kwargs)
tty.debug("Regenerating environment views for {0}"
.format(env.name))
with env.write_transaction():
# write env to trigger view generation and modulefile
# generation
env.write()
return
else:
msg = "install requires a package argument or active environment"
if 'spack.yaml' in os.listdir(os.getcwd()):
# There's a spack.yaml file in the working dir, the user may
# have intended to use that
msg += "\n\n"
msg += "Did you mean to install using the `spack.yaml`"
msg += " in this directory? Try: \n"
msg += " spack env activate .\n"
msg += " spack install\n"
msg += " OR\n"
msg += " spack --env . install"
tty.die(msg)
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
if args.deprecated:
spack.config.set('config:deprecated', True, scope='command_line')
# Parse cli arguments and construct a dictionary
# that will be passed to the package installer
update_kwargs_from_args(args, kwargs)
# 1. Abstract specs from cli
abstract_specs = spack.cmd.parse_specs(args.spec)
tests = get_tests(abstract_specs)
kwargs['tests'] = tests
try:
specs = spack.cmd.parse_specs(
args.spec, concretize=True, tests=tests)
except SpackError as e:
tty.debug(e)
reporter.concretization_report(e.message)
raise
# 2. Concrete specs from yaml files
for file in args.specfiles:
with open(file, 'r') as f:
if file.endswith('yaml') or file.endswith('yml'):
s = spack.spec.Spec.from_yaml(f)
else:
s = spack.spec.Spec.from_json(f)
concretized = s.concretized()
tty.debug('reconcretized dag hash:', concretized.dag_hash())
tty.debug('original dag hash:', s.dag_hash())
if concretized.dag_hash() != s.dag_hash():
msg = 'skipped invalid file "{0}". '
msg += 'The file does not contain a concrete spec.'
tty.warn(msg.format(file))
continue
abstract_specs.append(s)
specs.append(concretized)
if len(specs) == 0:
tty.die('The `spack install` command requires a spec to install.')
if not args.log_file and not reporter.filename:
reporter.filename = default_log_file(specs[0])
reporter.specs = specs
with reporter('build'):
if args.overwrite:
installed = list(filter(lambda x: x,
map(spack.store.db.query_one, specs)))
if not args.yes_to_all:
display_args = {
'long': True,
'show_flags': True,
'variants': True
}
if installed:
tty.msg('The following package specs will be '
'reinstalled:\n')
spack.cmd.display_specs(installed, **display_args)
not_installed = list(filter(lambda x: x not in installed,
specs))
if not_installed:
tty.msg('The following package specs are not installed and'
' the --overwrite flag was given. The package spec'
' will be newly installed:\n')
spack.cmd.display_specs(not_installed, **display_args)
# We have some specs, so one of the above must have been true
answer = tty.get_yes_or_no(
'Do you want to proceed?', default=False
)
if not answer:
tty.die('Reinstallation aborted.')
# overwrite all concrete explicit specs from this build
kwargs['overwrite'] = [spec.dag_hash() for spec in specs]
# Update install_args with the monitor args, needed for build task
kwargs.update({
"monitor_disable_auth": args.monitor_disable_auth,
"monitor_keep_going": args.monitor_keep_going,
"monitor_host": args.monitor_host,
"use_monitor": args.use_monitor,
"monitor_prefix": args.monitor_prefix,
})
# If we are using the monitor, we send configs. and create build
# The full_hash is the main package id, the build_hash for others
if args.use_monitor and specs:
monitor.new_configuration(specs)
install_specs(args, kwargs, zip(abstract_specs, specs))
|
23,111 |
def merge_percentiles(
finalq, qs, vals, interpolation="lower", Ns=None, raise_on_nan=True
):
"""Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of :class:`numpy.array`s
Percentiles calculated on different sets of data.
vals : sequence of :class:`numpy.array`s
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see :func:`numpy.percentile`.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns=Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
from .utils import array_safe
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = array_safe(finalq, like=finalq)
qs = list(map(list, qs))
vals = list(vals)
if Ns is None:
vals, Ns = zip(*vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
if raise_on_nan:
raise ValueError("No non-trivial arrays found")
return np.full((len(qs[0]) - 2,), np.nan)
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if vals[0].dtype.name == "category":
result = merge_percentiles(
finalq, qs, [v.codes for v in vals], interpolation, Ns, raise_on_nan
)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = "nearest"
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError("qs, vals, and Ns parameters must be the same length")
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty_like(finalq, shape=len(q))
count[1:] = np.diff(array_safe(q, like=q[0]))
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
combined_vals = np.concatenate(vals)
combined_counts = array_safe(np.concatenate(counts), like=combined_vals)
sort_order = np.argsort(combined_vals)
combined_vals = np.take(combined_vals, sort_order)
combined_counts = np.take(combined_counts, sort_order)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
finalq = array_safe(finalq, like=combined_vals)
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == "linear":
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side="left")
right = np.searchsorted(combined_q, desired_q, side="right") - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == "lower":
rv = combined_vals[lower]
elif interpolation == "higher":
rv = combined_vals[upper]
elif interpolation == "midpoint":
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == "nearest":
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError(
"interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'"
)
return rv
|
def merge_percentiles(
finalq, qs, vals, interpolation="lower", Ns=None, raise_on_nan=True
):
"""Combine several percentile calculations of different data.
Parameters
----------
finalq : numpy.array
Percentiles to compute (must use same scale as ``qs``).
qs : sequence of :class:`numpy.array`s
Percentiles calculated on different sets of data.
vals : sequence of :class:`numpy.array`s
Resulting values associated with percentiles ``qs``.
Ns : sequence of integers
The number of data elements associated with each data set.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specify the type of interpolation to use to calculate final
percentiles. For more information, see :func:`numpy.percentile`.
Examples
--------
>>> finalq = [10, 20, 30, 40, 50, 60, 70, 80]
>>> qs = [[20, 40, 60, 80], [20, 40, 60, 80]]
>>> vals = [np.array([1, 2, 3, 4]), np.array([10, 11, 12, 13])]
>>> Ns = [100, 100] # Both original arrays had 100 elements
>>> merge_percentiles(finalq, qs, vals, Ns=Ns)
array([ 1, 2, 3, 4, 10, 11, 12, 13])
"""
from .utils import array_safe
if isinstance(finalq, Iterator):
finalq = list(finalq)
finalq = array_safe(finalq, like=finalq)
qs = list(map(list, qs))
vals = list(vals)
if Ns is None:
vals, Ns = zip(*vals)
Ns = list(Ns)
L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))
if not L:
if raise_on_nan:
raise ValueError("No non-trivial arrays found")
return np.full(len(qs[0]) - 2, np.nan)
qs, vals, Ns = L
# TODO: Perform this check above in percentile once dtype checking is easy
# Here we silently change meaning
if vals[0].dtype.name == "category":
result = merge_percentiles(
finalq, qs, [v.codes for v in vals], interpolation, Ns, raise_on_nan
)
import pandas as pd
return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)
if not np.issubdtype(vals[0].dtype, np.number):
interpolation = "nearest"
if len(vals) != len(qs) or len(Ns) != len(qs):
raise ValueError("qs, vals, and Ns parameters must be the same length")
# transform qs and Ns into number of observations between percentiles
counts = []
for q, N in zip(qs, Ns):
count = np.empty_like(finalq, shape=len(q))
count[1:] = np.diff(array_safe(q, like=q[0]))
count[0] = q[0]
count *= N
counts.append(count)
# Sort by calculated percentile values, then number of observations.
combined_vals = np.concatenate(vals)
combined_counts = array_safe(np.concatenate(counts), like=combined_vals)
sort_order = np.argsort(combined_vals)
combined_vals = np.take(combined_vals, sort_order)
combined_counts = np.take(combined_counts, sort_order)
# percentile-like, but scaled by total number of observations
combined_q = np.cumsum(combined_counts)
# rescale finalq percentiles to match combined_q
finalq = array_safe(finalq, like=combined_vals)
desired_q = finalq * sum(Ns)
# the behavior of different interpolation methods should be
# investigated further.
if interpolation == "linear":
rv = np.interp(desired_q, combined_q, combined_vals)
else:
left = np.searchsorted(combined_q, desired_q, side="left")
right = np.searchsorted(combined_q, desired_q, side="right") - 1
np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index
lower = np.minimum(left, right)
upper = np.maximum(left, right)
if interpolation == "lower":
rv = combined_vals[lower]
elif interpolation == "higher":
rv = combined_vals[upper]
elif interpolation == "midpoint":
rv = 0.5 * (combined_vals[lower] + combined_vals[upper])
elif interpolation == "nearest":
lower_residual = np.abs(combined_q[lower] - desired_q)
upper_residual = np.abs(combined_q[upper] - desired_q)
mask = lower_residual > upper_residual
index = lower # alias; we no longer need lower
index[mask] = upper[mask]
rv = combined_vals[index]
else:
raise ValueError(
"interpolation can only be 'linear', 'lower', "
"'higher', 'midpoint', or 'nearest'"
)
return rv
|
40,081 |
def upgrade_db(code_versions: dict, db_versions: dict, upgrade_module):
"""
Run the upgrade process for the given module. Raises exception on errors, caller must handle end exit cleanly.
Expects that the db has been initialized already via call to init_database() or similar
:param code_versions: dict with versions for the code found installed
:param db_versions: dict with versions for the versions found stored in the db (typically returned from init_database() call
:param upgrade_module:
:return: running db_version after upgrade
"""
# Load the module for upgrade (provides the upgrade routines etc
module = upgrade_module
versions_tuple = needs_upgrade(code_versions, db_versions)
if versions_tuple:
code_db_version = versions_tuple[0]
running_db_version = versions_tuple[1]
logger.info("Detected anchore-engine version {}, running DB version {}.".format(code_db_version, running_db_version))
logger.info("Performing upgrade.")
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
else:
logger.info("Code and DB versions are in sync. No upgrade required")
return True
|
def upgrade_db(code_versions: dict, db_versions: dict, upgrade_module):
"""
Run the upgrade process for the given module. Raises exception on errors, caller must handle end exit cleanly.
Expects that the db has been initialized already via call to init_database() or similar
:param code_versions: dict with versions for the code found installed
:param db_versions: dict with versions for the versions found stored in the db (typically returned from init_database() call
:param upgrade_module:
:return: running db_version after upgrade
"""
# Load the module for upgrade (provides the upgrade routines etc
module = upgrade_module
versions_tuple = needs_upgrade(code_versions, db_versions)
if versions_tuple:
code_db_version = versions_tuple[0]
running_db_version = versions_tuple[1]
logger.info("Detected anchore-engine version %s, running DB version %s.", code_db_version, running_db_version)
logger.info("Performing upgrade.")
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
else:
logger.info("Code and DB versions are in sync. No upgrade required")
return True
|
21,846 |
def load_appservices(
hostname: str, config_files: List[str]
) -> List[ApplicationService]:
"""Returns a list of Application Services from the config files."""
if not isinstance(config_files, list):
# type-ignore: this function gets arbitrary json value; we do use this path.
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
return []
# Dicts of value -> filename
seen_as_tokens: Dict[str, str] = {}
seen_ids: Dict[str, str] = {}
appservices = []
for config_file in config_files:
try:
with open(config_file) as f:
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
if appservice.id in seen_ids:
raise ConfigError(
"Cannot reuse ID across application services: "
"%s (files: %s, %s)"
% (appservice.id, config_file, seen_ids[appservice.id])
)
seen_ids[appservice.id] = config_file
if appservice.token in seen_as_tokens:
raise ConfigError(
"Cannot reuse as_token across application services: "
"%s (files: %s, %s)"
% (
appservice.token,
config_file,
seen_as_tokens[appservice.token],
)
)
seen_as_tokens[appservice.token] = config_file
logger.info("Loaded application service: %s", appservice)
appservices.append(appservice)
except Exception as e:
logger.error("Failed to load appservice from '%s'", config_file, exc_info=e)
raise
return appservices
|
def load_appservices(
hostname: str, config_files: List[str]
) -> List[ApplicationService]:
"""Returns a list of Application Services from the config files."""
if not isinstance(config_files, list):
# type-ignore: this function gets arbitrary json value; we do use this path.
logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
return []
# Dicts of value -> filename
seen_as_tokens: Dict[str, str] = {}
seen_ids: Dict[str, str] = {}
appservices = []
for config_file in config_files:
try:
with open(config_file) as f:
appservice = _load_appservice(hostname, yaml.safe_load(f), config_file)
if appservice.id in seen_ids:
raise ConfigError(
"Cannot reuse ID across application services: "
"%s (files: %s, %s)"
% (appservice.id, config_file, seen_ids[appservice.id])
)
seen_ids[appservice.id] = config_file
if appservice.token in seen_as_tokens:
raise ConfigError(
"Cannot reuse as_token across application services: "
"%s (files: %s, %s)"
% (
appservice.token,
config_file,
seen_as_tokens[appservice.token],
)
)
seen_as_tokens[appservice.token] = config_file
logger.info("Loaded application service: %s", appservice)
appservices.append(appservice)
except Exception as e:
logger.exception("Failed to load appservice from '%s'", config_file)
raise
return appservices
|
39,074 |
def create_ssl_context(
certfile: Union[Path, str],
keyfile: Optional[str],
password: Optional[str],
ssl_version: int,
cert_reqs: int,
ca_certs: Optional[str],
ciphers: Optional[str],
) -> ssl.SSLContext:
ctx = ssl.SSLContext(ssl_version)
get_password = (lambda: password) if password else None
ctx.load_cert_chain(certfile, keyfile, get_password)
ctx.verify_mode = cert_reqs
if ca_certs:
ctx.load_verify_locations(ca_certs)
if ciphers:
ctx.set_ciphers(ciphers)
return ctx
|
def create_ssl_context(
certfile: StrPath,
keyfile: Optional[str],
password: Optional[str],
ssl_version: int,
cert_reqs: int,
ca_certs: Optional[str],
ciphers: Optional[str],
) -> ssl.SSLContext:
ctx = ssl.SSLContext(ssl_version)
get_password = (lambda: password) if password else None
ctx.load_cert_chain(certfile, keyfile, get_password)
ctx.verify_mode = cert_reqs
if ca_certs:
ctx.load_verify_locations(ca_certs)
if ciphers:
ctx.set_ciphers(ciphers)
return ctx
|
32,379 |
def fetch_incidents(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = demisto.params().get('first_fetch')
attribute_key = demisto.params().get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = demisto.params().get('AttributeValue')
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(**kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.incidents(incidents)
|
def fetch_incidents(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = demisto.params().get('first_fetch')
attribute_key = params.get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = demisto.params().get('AttributeValue')
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(**kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.incidents(incidents)
|
12,199 |
def _supplement_index_with_system(index):
cuda_version = context.cuda_version
if cuda_version is not None:
rec = _make_virtual_package('__cuda', cuda_version)
index[rec] = rec
libc_family, libc_version = linux_get_libc_version()
if libc_family == "glibc":
rec = _make_virtual_package('__glibc', libc_version)
index[rec] = rec
|
def _supplement_index_with_system(index):
cuda_version = context.cuda_version
if cuda_version is not None:
rec = _make_virtual_package('__cuda', cuda_version)
index[rec] = rec
libc_family, libc_version = linux_get_libc_version()
if libc_family == "glibc":
rec = _make_virtual_package('__' + libc_family, libc_version)
index[rec] = rec
|
45,905 |
def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image. We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2
The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2
for simplicity reasons
Args:
image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`.
cfa: The configuration of the color filter.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> rawinput = torch.randn(2, 1, 4, 6)
>>> rgb = raw_to_rgb(rawinput) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. " f"Got {type(image)}")
if image.dim() < 3 or image.size(-3) != 1:
raise ValueError(f"Input size must have a shape of (*, 1, H, W). " f"Got {image.shape}.")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
dosqueeze = False
# for compatibility with pytorch funcitons, make sure we are always 4 dimensions and
# strip the extra at the end, if necessary
if len(image.shape) == 3:
image = image.unsqueeze(0)
dosqueeze = True
# BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches
# opencv naming so makes sense to keep
if cfa == CFA.BG:
r = image[..., :, ::2, ::2]
b = image[..., :, 1::2, 1::2]
rpad = (0, 1, 0, 1)
bpad = (1, 0, 1, 0)
elif cfa == CFA.GB:
r = image[..., :, ::2, 1::2]
b = image[..., :, 1::2, ::2]
rpad = (1, 0, 0, 1)
bpad = (0, 1, 1, 0)
elif cfa == CFA.RG:
r = image[..., :, 1::2, 1::2]
b = image[..., :, ::2, ::2]
rpad = (1, 0, 1, 0)
bpad = (0, 1, 0, 1)
elif cfa == CFA.GR:
r = image[..., :, 1::2, ::2]
b = image[..., :, ::2, 1::2]
rpad = (0, 1, 1, 0)
bpad = (1, 0, 0, 1)
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
# upscaling r and b with bi-linear gives reasonable quality
# Note that depending on where these are sampled we need to pad appropriately
# the bilinear filter will pretty much be based on for example this layout (RG)
# (which needs to be padded bottom right)
# +-+-+
# |B| |
# | | |
# +-+-+
# While in this layout we need to pad with additional B samples top left to
# make sure we interpolate from the correct position
# +-+-+
# | | |
# | |B|
# +-+-+
# For an image like this (3x2 blue pixels)
# +------+
# |B B B |
# | |
# |B B B |
# | |
# +------+
# It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation)
# +-------+
# |B B B b|
# | |
# |B B B b|
# | |
# |b b b b|
# +-------+
# and we crop the area afterwards. This is since the interpolation will be between first and last pixel
# evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist
# Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that
# way we get to keep the knwon blue samples across the whole image
rpadded = torch.nn.functional.pad(r, rpad, 'replicate')
bpadded = torch.nn.functional.pad(b, bpad, 'replicate')
# use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors
# for a 2x2 bayer filter
gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect')
ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
# remove the extra padding
ru = torch.nn.functional.pad(ru, [-x for x in rpad])
bu = torch.nn.functional.pad(bu, [-x for x in bpad])
# all unknown pixels are the average of the nearby green samples
kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]],
dtype=image.dtype, device=image.device)
# This is done on all samples but result for the known green samples is then overwritten by the input
gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid')
# overwrite the already known samples which otherwise have values from r/b
# this depends on the CFA configuration
if cfa == CFA.BG:
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
elif cfa == CFA.GB:
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
elif cfa == CFA.RG:
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
elif cfa == CFA.GR:
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3)
# return possibly missing batch dim
if dosqueeze:
rgb = rgb.squeeze(0)
return rgb
|
def raw_to_rgb(image: torch.Tensor, cfa: CFA) -> torch.Tensor:
r"""Convert a raw bayer image to RGB version of image. We are assuming a CFA
with 2 green, 1 red, 1 blue. A bilinear interpolation is used for R/G and a fix convolution
for the green pixels. To simplify calculations we expect the Height Widht to be evenly divisible by 2
The image data is assumed to be in the range of (0, 1). Image H/W is assumed to be evenly divisible by 2
for simplicity reasons
Args:
image: raw image to be converted to RGB with shape :math:`(*,1,H,W)`.
cfa: The configuration of the color filter.
Returns:
RGB version of the image with shape :math:`(*,3,H,W)`.
Example:
>>> rawinput = torch.randn(2, 1, 4, 6)
>>> rgb = raw_to_rgb(rawinput) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. " f"Got {type(image)}")
if image.dim() < 3 or image.size(-3) != 1:
raise ValueError(f"Input size must have a shape of (*, 1, H, W). " f"Got {image.shape}.")
if len(image.shape) < 2 or image.shape[-2] % 2 == 1 or image.shape[-1] % 2 == 1:
raise ValueError(f"Input H&W must be evenly disible by 2. Got {image.shape}")
dosqueeze = False
# for compatibility with pytorch funcitons, make sure we are always 4 dimensions and
# strip the extra at the end, if necessary
if len(image.shape) == 3:
image = image.unsqueeze(0)
dosqueeze = True
# BG is defined as pel 1,1 being blue, that is the top left is actually green. This matches
# opencv naming so makes sense to keep
if cfa == CFA.BG:
r = image[..., :, ::2, ::2]
b = image[..., :, 1::2, 1::2]
rpad = (0, 1, 0, 1)
bpad = (1, 0, 1, 0)
elif cfa == CFA.GB:
r = image[..., :, ::2, 1::2]
b = image[..., :, 1::2, ::2]
rpad = (1, 0, 0, 1)
bpad = (0, 1, 1, 0)
elif cfa == CFA.RG:
r = image[..., :, 1::2, 1::2]
b = image[..., :, ::2, ::2]
rpad = (1, 0, 1, 0)
bpad = (0, 1, 0, 1)
elif cfa == CFA.GR:
r = image[..., :, 1::2, ::2]
b = image[..., :, ::2, 1::2]
rpad = (0, 1, 1, 0)
bpad = (1, 0, 0, 1)
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
# upscaling r and b with bi-linear gives reasonable quality
# Note that depending on where these are sampled we need to pad appropriately
# the bilinear filter will pretty much be based on for example this layout (RG)
# (which needs to be padded bottom right)
# +-+-+
# |B| |
# | | |
# +-+-+
# While in this layout we need to pad with additional B samples top left to
# make sure we interpolate from the correct position
# +-+-+
# | | |
# | |B|
# +-+-+
# For an image like this (3x2 blue pixels)
# +------+
# |B B B |
# | |
# |B B B |
# | |
# +------+
# It needs to be expanded to this (4x3 pixels scaled to 7x5 for correct interpolation)
# +-------+
# |B B B b|
# | |
# |B B B b|
# | |
# |b b b b|
# +-------+
# and we crop the area afterwards. This is since the interpolation will be between first and last pixel
# evenly spaced between them while the B/R samples will be missing in the corners were they are assumed to exist
# Further we need to do align_corners to start the interpolation from the middle of the samples in the corners, that
# way we get to keep the knwon blue samples across the whole image
rpadded = torch.nn.functional.pad(r, rpad, 'replicate')
bpadded = torch.nn.functional.pad(b, bpad, 'replicate')
# use explicit padding instead of conv2d padding to be able to use reflect which mirror the correct colors
# for a 2x2 bayer filter
gpadded = torch.nn.functional.pad(image, (1, 1, 1, 1), 'reflect')
ru = torch.nn.functional.interpolate(rpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
bu = torch.nn.functional.interpolate(bpadded, size=(image.shape[-2] + 1, image.shape[-1] + 1),
mode='bilinear', align_corners=True)
# remove the extra padding
ru = torch.nn.functional.pad(ru, [-x for x in rpad])
bu = torch.nn.functional.pad(bu, [-x for x in bpad])
# all unknown pixels are the average of the nearby green samples
kernel = torch.tensor([[[[0.0, 0.25, 0.0], [0.25, 0.0, 0.25], [0.0, 0.25, 0.0]]]],
dtype=image.dtype, device=image.device)
# This is done on all samples but result for the known green samples is then overwritten by the input
gu = torch.nn.functional.conv2d(gpadded, kernel, padding='valid')
# overwrite the already known samples which otherwise have values from r/b
# this depends on the CFA configuration
if cfa == CFA.BG:
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
elif cfa == CFA.GB:
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
elif cfa == CFA.RG:
gu[:, :, 1::2, ::2] = image[:, :, 1::2, ::2]
gu[:, :, ::2, 1::2] = image[:, :, ::2, 1::2]
elif cfa == CFA.GR:
gu[:, :, 1::2, 1::2] = image[:, :, 1::2, 1::2]
gu[:, :, ::2, ::2] = image[:, :, ::2, ::2]
else:
raise ValueError(f"Unsupported CFA " f"Got {cfa}.")
rgb: torch.Tensor = torch.cat([ru, gu, bu], dim=-3)
# return possibly missing batch dim
if dosqueeze:
rgb = rgb.squeeze(0)
return rgb.view_as(image)
|
7,429 |
def stain_color_matrix(colors):
"""Creates a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted form the Java code written by G.Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which I do not recognize as a stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
def stain_color_matrix(colors):
"""Create a stain color matrix for a combination of stains.
This routine knows some common stains, their colors are taken from
other tools implementing stain unmixing, but will likely not exactly
match the colors of the stains in your image. This is because the
color of a stain depends on many factors, including the chemistry,
the microscope light source, and the RGB camera capturing the image.
It is always best to measure your stain colors.
Known stains are:
"Hematoxylin"
"Eosin"
"DAB"
"AEC"
"Alcian Blue"
"Aniline Blue"
"Azocarmine"
"FastBlue"
"FastRed"
"Feulgen"
"Light Green"
"Methyl Blue"
"Methyl Green"
"Orange-G"
"PAS"
"Ponceau Fuchsin"
See separate_stains() and combine_stains().
Parameters
----------
colors : iterable with 1 to 3 elements. Each element must be either a
string for a known stain name (see below) or an RGB triplet in the
form of an iterable.
Returns
-------
out : (..., 3) ndarray
The stain color matrix, an Nx3 matrix, where N is the length of the
input `colors`.
Raises
------
ValueError
If `colors` contains an unknown stain name or an illegal RGB triplet,
or if `colors` is empty or has more than 3 elements.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
"""
# Following matrices are adapted form the Java code written by G.Landini.
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Similar values can be found in CellProfiler:
# https://github.com/CellProfiler/CellProfiler/blob/master/cellprofiler/modules/unmixcolors.py
stain_colors = {
"Hematoxylin": (0.650, 0.704, 0.286),
"Eosin": (0.092789, 0.954111, 0.283111),
"DAB": (0.268, 0.570, 0.776),
"AEC": (0.2743, 0.6796, 0.6803),
"Alcian Blue": (0.552556, 0.7544, 0.353744),
"Aniline Blue": (0.853033, 0.508733, 0.112656),
"Azocarmine": (0.09289875, 0.8662008, 0.49098468),
"FastBlue": (0.74890292, 0.60624161, 0.26731082),
"FastRed": (0.21393921, 0.85112669, 0.47794022),
"Feulgen": (0.46420921, 0.83008335, 0.30827187),
"Light Green": (0.94705542, 0.25373821, 0.19650764),
"Methyl Blue": (0.7995107, 0.5913521, 0.10528667),
"Methyl Green": (0.98003, 0.144316, 0.133146),
"Orange-G": (0.10732849, 0.36765403, 0.9237484),
"PAS": (0.175411, 0.972178, 0.154589),
"Ponceau Fuchsin": (0.09997159, 0.73738605, 0.6680326),
}
N = len(colors)
if N < 1 or N > 3:
msg = (f'the input `colors` must have between 1 and 3 elements, got {N}')
raise ValueError(msg)
out = np.zeros((N, 3))
for ii, val in enumerate(colors):
if isinstance(val, str):
if not val in stain_colors:
msg = (f'the input `colors` contains {val}, which I do not recognize as a stain')
raise ValueError(msg)
val = stain_colors[val]
else:
if len(val) != 3 or not all(isinstance(v, float) for v in val):
msg = (f'the input `colors` contains {val}, which is not an RGB triplet')
raise ValueError(msg)
norm = np.linalg.norm(val)
val = [v / norm for v in val]
out[ii, :] = val
return out
|
7,733 |
def test_tally(capi_init):
t = openmc.capi.tallies[1]
assert t.type == 'volume'
assert len(t.filters) == 2
assert isinstance(t.filters[0], openmc.capi.MaterialFilter)
assert isinstance(t.filters[1], openmc.capi.EnergyFilter)
# Create new filter and replace existing
with pytest.raises(exc.AllocationError):
openmc.capi.MaterialFilter(uid=1)
mats = openmc.capi.materials
f = openmc.capi.MaterialFilter([mats[2], mats[1]])
assert f.bins[0] == mats[2]
assert f.bins[1] == mats[1]
t.filters = [f]
assert t.filters == [f]
assert t.nuclides == ['U235', 'U238']
with pytest.raises(exc.DataError):
t.nuclides = ['Zr2']
t.nuclides = ['U234', 'Zr90']
assert t.nuclides == ['U234', 'Zr90']
assert t.scores == ['total', '(n,elastic)', '(n,gamma)']
new_scores = ['scatter', 'fission', 'nu-fission', '(n,2n)']
t.scores = new_scores
assert t.scores == new_scores
t2 = openmc.capi.tallies[2]
assert len(t2.filters) == 2
assert isinstance(t2.filters[0], openmc.capi.ZernikeFilter)
assert isinstance(t2.filters[1], openmc.capi.CellFilter)
assert len(t2.filters[1].bins) == 3
assert t2.filters[0].order == 5
t3 = openmc.capi.tallies[3]
assert len(t3.filters) == 1
t3_f = t3.filters[0]
assert isinstance(t3_f, openmc.capi.EnergyFunctionFilter)
assert len(t3_f.energy) == 2
assert len(t3_f.y) == 2
t3_f.set_interp_data([0.0, 1.0, 2.0], [0.0, 1.0, 4.0])
assert len(t3_f.energy) == 3
assert len(t3_f.y) == 3
|
def test_tally(capi_init):
t = openmc.capi.tallies[1]
assert t.type == 'volume'
assert len(t.filters) == 2
assert isinstance(t.filters[0], openmc.capi.MaterialFilter)
assert isinstance(t.filters[1], openmc.capi.EnergyFilter)
# Create new filter and replace existing
with pytest.raises(exc.AllocationError):
openmc.capi.MaterialFilter(uid=1)
mats = openmc.capi.materials
f = openmc.capi.MaterialFilter([mats[2], mats[1]])
assert f.bins[0] == mats[2]
assert f.bins[1] == mats[1]
t.filters = [f]
assert t.filters == [f]
assert t.nuclides == ['U235', 'U238']
with pytest.raises(exc.DataError):
t.nuclides = ['Zr2']
t.nuclides = ['U234', 'Zr90']
assert t.nuclides == ['U234', 'Zr90']
assert t.scores == ['total', '(n,elastic)', '(n,gamma)']
new_scores = ['scatter', 'fission', 'nu-fission', '(n,2n)']
t.scores = new_scores
assert t.scores == new_scores
t2 = openmc.capi.tallies[2]
assert len(t2.filters) == 2
assert isinstance(t2.filters[0], openmc.capi.ZernikeFilter)
assert isinstance(t2.filters[1], openmc.capi.CellFilter)
assert len(t2.filters[1].bins) == 3
assert t2.filters[0].order == 5
t3 = openmc.capi.tallies[3]
assert len(t3.filters) == 1
t3_f = t3.filters[0]
assert isinstance(t3_f, openmc.capi.EnergyFunctionFilter)
assert len(t3_f.energy) == 2
assert len(t3_f.y) == 2
t3_f.set_data([0.0, 1.0, 2.0], [0.0, 1.0, 4.0])
assert len(t3_f.energy) == 3
assert len(t3_f.y) == 3
|
48,364 |
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
module.warn("Could not find aptitude. Using apt-get instead")
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache up to 5 times with exponential backoff
err = ''
max_fail_count = 5
max_fail_sleep = 12
randint = random.randint(0, 1000) / 1000
for retry in range(max_fail_count):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
fail_sleep = 2 ** retry + randint
if fail_sleep > max_fail_sleep:
fail_sleep = max_fail_sleep + randint
time.sleep(fail_sleep)
else:
module.fail_json(msg='Failed to update apt cache: %s' % err)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
module.warn("Could not find aptitude. Using apt-get instead")
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache up to 5 times with exponential backoff
err = ''
MAX_FAIL_COUNT = 5
MAX_FAIL_SLEEP = 12
max_fail_sleep = 12
randint = random.randint(0, 1000) / 1000
for retry in range(max_fail_count):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
fail_sleep = 2 ** retry + randint
if fail_sleep > max_fail_sleep:
fail_sleep = max_fail_sleep + randint
time.sleep(fail_sleep)
else:
module.fail_json(msg='Failed to update apt cache: %s' % err)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
36,904 |
def test_import_url_with_no_exec(tmp_dir, dvc, erepo_dir):
tmp_dir.gen({"data_dir": {"file": "file content"}})
src = os.path.join("data_dir", "file")
dvc.imp_url(src, "./", no_exec=True)
dst = tmp_dir / "file"
assert dst.exists() is False
|
def test_import_url_with_no_exec(tmp_dir, dvc, erepo_dir):
tmp_dir.gen({"data_dir": {"file": "file content"}})
src = os.path.join("data_dir", "file")
dvc.imp_url(src, "./", no_exec=True)
dst = tmp_dir / "file"
assert not dst.exists()
|
54,176 |
def remove_chrs_from_bam(bam, chrs, chrsz, nth=1, out_dir=''):
assert(len(chrs)>0)
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
suffix = 'no_{}'.format('_'.join(chrs))
final_bam = '{}.{}.bam'.format(prefix, suffix)
# tmp_bam = '{}.{}.tmp.bam'.format(prefix, suffix)
# tmp_header_sam = '{}.{}.header.tmp.sam'.format(prefix, suffix)
tmp_chrsz = '{}.{}.tmp.chrsz'.format(prefix, suffix)
# make a temp chrsz file
cmd0 = 'zcat -f {chrsz} |'
cmd0 += 'grep -v -P \'^({chrs})\\s\' | '
cmd0 += 'awk \'BEGIN{{OFS="\\t"}} {{print $1,0,$2}}\' > {tmp_chrsz}'
cmd0 = cmd0.format(
chrsz=chrsz,
chrs='|'.join(chrs),
tmp_chrsz=tmp_chrsz)
run_shell_cmd(cmd0)
# remove chrs from BAM
cmd1 = 'samtools view -b -L {tmp_chrsz} {bam} -@ {nth} > {final_bam}'
cmd1 = cmd1.format(
tmp_chrsz=tmp_chrsz,
bam=bam,
nth=nth,
final_bam=final_bam)
# tmp_bam=tmp_bam)
run_shell_cmd(cmd1)
rm_f(tmp_chrsz)
# # make a temp header
# cmd2 = 'samtools view -H {bam} > {tmp_header_sam}'
# cmd2 = cmd2.format(
# bam=bam,
# tmp_header_sam=tmp_header_sam)
# run_shell_cmd(cmd2)
# # update header
# cmd3 = 'samtools reheader {tmp_header_sam} {tmp_bam} > {final_bam}'
# cmd3 = cmd3.format(
# tmp_header_sam=tmp_header_sam,
# tmp_bam=tmp_bam,
# final_bam=final_bam)
# run_shell_cmd(cmd3)
# rm_f([tmp_bam, tmp_header_sam, tmp_chrsz])
return final_bam
|
def remove_chrs_from_bam(bam, chrs, chrsz, nth=1, out_dir=''):
if not len(chrs):
raise ValueError('There must be at least one chromosome, zero found.')
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_bam(bam)))
suffix = 'no_{}'.format('_'.join(chrs))
final_bam = '{}.{}.bam'.format(prefix, suffix)
# tmp_bam = '{}.{}.tmp.bam'.format(prefix, suffix)
# tmp_header_sam = '{}.{}.header.tmp.sam'.format(prefix, suffix)
tmp_chrsz = '{}.{}.tmp.chrsz'.format(prefix, suffix)
# make a temp chrsz file
cmd0 = 'zcat -f {chrsz} |'
cmd0 += 'grep -v -P \'^({chrs})\\s\' | '
cmd0 += 'awk \'BEGIN{{OFS="\\t"}} {{print $1,0,$2}}\' > {tmp_chrsz}'
cmd0 = cmd0.format(
chrsz=chrsz,
chrs='|'.join(chrs),
tmp_chrsz=tmp_chrsz)
run_shell_cmd(cmd0)
# remove chrs from BAM
cmd1 = 'samtools view -b -L {tmp_chrsz} {bam} -@ {nth} > {final_bam}'
cmd1 = cmd1.format(
tmp_chrsz=tmp_chrsz,
bam=bam,
nth=nth,
final_bam=final_bam)
# tmp_bam=tmp_bam)
run_shell_cmd(cmd1)
rm_f(tmp_chrsz)
# # make a temp header
# cmd2 = 'samtools view -H {bam} > {tmp_header_sam}'
# cmd2 = cmd2.format(
# bam=bam,
# tmp_header_sam=tmp_header_sam)
# run_shell_cmd(cmd2)
# # update header
# cmd3 = 'samtools reheader {tmp_header_sam} {tmp_bam} > {final_bam}'
# cmd3 = cmd3.format(
# tmp_header_sam=tmp_header_sam,
# tmp_bam=tmp_bam,
# final_bam=final_bam)
# run_shell_cmd(cmd3)
# rm_f([tmp_bam, tmp_header_sam, tmp_chrsz])
return final_bam
|
58,961 |
def main(sm=None, pm=None):
set_realtime_priority(1)
if sm is None:
sm = messaging.SubMaster(['carControl', 'carState', 'liveLocationKalman'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveTorqueParameters'])
params_reader = Params()
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
params = params_reader.get("LiveTorqueParameters")
estimator = TorqueEstimator(CP, params)
while True:
sm.update()
if sm.all_checks():
for which in sm.updated.keys():
if sm.updated[which]:
t = sm.logMonoTime[which] * 1e-9
estimator.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
# print(sm.frame, [len(v) for v in estimator.filtered_points.buckets.values()])
msg = messaging.new_message('liveTorqueParameters')
msg.valid = sm.all_checks()
liveTorqueParameters = msg.liveTorqueParameters
if estimator.filtered_points.is_valid():
try:
slope, offset, friction_coeff = estimator.estimate_params()
# print(slope, offset, friction_coeff)
except Exception as e:
# print(e)
slope = offset = friction_coeff = None
cloudlog.exception(f"Error computing live torque params: {e}")
if estimator.is_sane(slope, offset, friction_coeff):
liveTorqueParameters.liveValid = True
liveTorqueParameters.slopeRaw = slope
liveTorqueParameters.offsetRaw = offset
liveTorqueParameters.frictionCoefficientRaw = friction_coeff
estimator.slopeFiltered.update(slope)
estimator.offsetFiltered.update(offset)
estimator.frictionCoefficientFiltered.update(friction_coeff)
else:
cloudlog.exception("live torque params are numerically unstable")
liveTorqueParameters.liveValid = False
# estimator.reset()
else:
liveTorqueParameters.liveValid = False
liveTorqueParameters.slopeFiltered = estimator.slopeFiltered.x
liveTorqueParameters.offsetFiltered = estimator.offsetFiltered.x
liveTorqueParameters.frictionCoefficientFiltered = estimator.frictionCoefficientFiltered.x
liveTorqueParameters.totalBucketPoints = len(estimator.filtered_points)
if sm.frame % 1200 == 0: # once a minute
params_to_write = {
"slope": estimator.slopeFiltered.x,
"offset": estimator.offsetFiltered.x,
"frictionCoefficient": estimator.frictionCoefficientFiltered.x
}
put_nonblocking("LiveTorqueParameters", json.dumps(params_to_write))
pm.send('liveTorqueParameters', msg)
|
def main(sm=None, pm=None):
set_realtime_priority(1)
if sm is None:
sm = messaging.SubMaster(['carControl', 'carState', 'liveLocationKalman'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveTorqueParameters'])
params_reader = Params()
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
params = params_reader.get("LiveTorqueParameters")
estimator = TorqueEstimator(CP, params)
while True:
sm.update()
if sm.all_checks():
for which in sm.updated.keys():
if sm.updated[which]:
t = sm.logMonoTime[which] * 1e-9
estimator.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
# print(sm.frame, [len(v) for v in estimator.filtered_points.buckets.values()])
msg = messaging.new_message('liveTorqueParameters')
msg.valid = sm.all_checks()
liveTorqueParameters = msg.liveTorqueParameters
if estimator.filtered_points.is_valid():
try:
slope, offset, friction_coeff = estimator.estimate_params()
# print(slope, offset, friction_coeff)
except Exception as e:
# print(e)
slope = offset = friction_coeff = None
cloudlog.exception(f"Error computing live torque params: {e}")
if estimator.is_sane(slope, offset, friction_coeff):
liveTorqueParameters.liveValid = True
liveTorqueParameters.slopeRaw = float(slope)
liveTorqueParameters.offsetRaw = float(offset)
liveTorqueParameters.frictionCoefficientRaw = float(friction_coeff)
estimator.slopeFiltered.update(slope)
estimator.offsetFiltered.update(offset)
estimator.frictionCoefficientFiltered.update(friction_coeff)
else:
cloudlog.exception("live torque params are numerically unstable")
liveTorqueParameters.liveValid = False
# estimator.reset()
else:
liveTorqueParameters.liveValid = False
liveTorqueParameters.slopeFiltered = estimator.slopeFiltered.x
liveTorqueParameters.offsetFiltered = estimator.offsetFiltered.x
liveTorqueParameters.frictionCoefficientFiltered = estimator.frictionCoefficientFiltered.x
liveTorqueParameters.totalBucketPoints = len(estimator.filtered_points)
if sm.frame % 1200 == 0: # once a minute
params_to_write = {
"slope": estimator.slopeFiltered.x,
"offset": estimator.offsetFiltered.x,
"frictionCoefficient": estimator.frictionCoefficientFiltered.x
}
put_nonblocking("LiveTorqueParameters", json.dumps(params_to_write))
pm.send('liveTorqueParameters', msg)
|
45,984 |
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = errors.sort(dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
41,249 |
def is_valid_placement(big_graph: nx.Graph, small_graph: nx.Graph, small_to_big_mapping: Dict):
"""Return whether the given placement is a valid placement of small_graph onto big_graph.
This is done by making sure all the nodes and edges on the mapped version of `small_graph`
are present in `big_graph`.
Args:
big_graph: A larger graph we're placing `small_graph` onto.
small_graph: A smaller, (potential) sub-graph to validate the given mapping.
small_to_big_mapping: A mappings from `small_graph` nodes to `big_graph`
nodes. After the mapping occurs, we check whether all of the mapped nodes and
edges exist on `big_graph`.
"""
small_mapped = nx.relabel_nodes(small_graph, small_to_big_mapping)
return _is_valid_placement_helper(
big_graph=big_graph, small_mapped=small_mapped, small_to_big_mapping=small_to_big_mapping
)
|
def is_valid_placement(big_graph: nx.Graph, small_graph: nx.Graph, small_to_big_mapping: Dict):
"""Return whether the given placement is a valid placement of small_graph onto big_graph.
This is done by making sure all the nodes and edges on the mapped version of `small_graph`
are present in `big_graph`.
Args:
big_graph: A larger graph we're placing `small_graph` onto.
small_graph: A smaller, (potential) sub-graph to validate the given mapping.
small_to_big_mapping: A mapping from `small_graph` nodes to `big_graph`
nodes. After the mapping occurs, we check whether all of the mapped nodes and
edges exist on `big_graph`.
"""
small_mapped = nx.relabel_nodes(small_graph, small_to_big_mapping)
return _is_valid_placement_helper(
big_graph=big_graph, small_mapped=small_mapped, small_to_big_mapping=small_to_big_mapping
)
|
42,315 |
def format_infraction_with_duration(
date_to: Optional[str],
date_from: Optional[datetime.datetime] = None,
max_units: int = 2,
absolute: bool = True
) -> Optional[str]:
"""
Return `date_to` formatted as a discord timestamp with the timestamp duration since `date_from`.
`max_units` specifies the maximum number of units of time to include in the duration. For
example, a value of 1 may include days but not hours.
If `absolute` is True, the absolute value of the duration delta is used. This prevents negative
values in the case that `date_to` is in the past relative to `date_from`.
"""
if not date_to:
return None
date_to_formatted = format_infraction(date_to)
date_from = date_from or datetime.datetime.now(datetime.timezone.utc)
date_to = dateutil.parser.isoparse(date_to).replace(microsecond=0)
delta = relativedelta(date_to, date_from)
if absolute:
delta = abs(delta)
duration = humanize_delta(delta, max_units=max_units)
duration_formatted = f" ({duration})" if duration else ""
return f"{date_to_formatted}{duration_formatted}"
|
def format_infraction_with_duration(
date_to: Optional[str],
date_from: Optional[datetime.datetime] = None,
max_units: int = 2,
absolute: bool = True
) -> Optional[str]:
"""
Return `date_to` formatted as a discord timestamp with the timestamp duration since `date_from`.
`max_units` specifies the maximum number of units of time to include in the duration. For
example, a value of 1 may include days but not hours.
If `absolute` is True, the absolute value of the duration delta is used. This prevents negative
values in the case that `date_to` is in the past relative to `date_from`.
"""
if not date_to:
return None
date_to_formatted = format_infraction(date_to)
date_from = date_from or arrow.utcnow()
date_to = dateutil.parser.isoparse(date_to).replace(microsecond=0)
delta = relativedelta(date_to, date_from)
if absolute:
delta = abs(delta)
duration = humanize_delta(delta, max_units=max_units)
duration_formatted = f" ({duration})" if duration else ""
return f"{date_to_formatted}{duration_formatted}"
|
2,385 |
def is_scalar_nan(x):
"""Tests if x is NaN.
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
# Try to use numpy `np.isnan` to check for nan. It can failed in case that
# `x` cannot be converted to a numpy number. Fall-back to the python
# `math.isnan`.
try:
# convert from numpy.bool_ to python bool to ensure that testing
# is_scalar_nan(x) is True does not fail.
return bool(isinstance(x, numbers.Real) and np.isnan(x))
except TypeError:
return math.isnan(x)
|
def is_scalar_nan(x):
"""Tests if x is NaN.
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not float('nan').
Parameters
----------
x : any type
Returns
-------
boolean
Examples
--------
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
# Try to use numpy `np.isnan` to check for nan. It can failed in case that
# `x` cannot be converted to a numpy number. Fall-back to the python
# `math.isnan`.
try:
# convert from numpy.bool_ to python bool to ensure that testing
# is_scalar_nan(x) is True does not fail.
return bool(isinstance(x, numbers.Real) and math.isnan(x))
except TypeError:
return math.isnan(x)
|
2,832 |
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Slope. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Intercept.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : int, default=3
Kernel degree.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Intercept.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
|
5,850 |
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq
>>> code_book = np.array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = np.array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
c_code_book = code_book.astype(ct, copy=False)
if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
return _vq.vq(c_obs, c_code_book)
return py_vq(obs, code_book, check_finite=False)
|
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> import numpy as np
>>> from scipy.cluster.vq import vq
>>> code_book = np.array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = np.array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = np.common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
c_code_book = code_book.astype(ct, copy=False)
if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32):
return _vq.vq(c_obs, c_code_book)
return py_vq(obs, code_book, check_finite=False)
|
2,835 |
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Slope. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Itercept.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Sigmoid kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
|
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Itercept.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Sigmoid kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
|
46,342 |
def getLogStr(s):
if s == "daal":
message = "uses Intel® DAAL solver"
elif s == "sklearn":
message = "uses original Scikit-learn solver"
elif s == "sklearn_after_daal":
message = "uses original Scikit-learn solver, because the task was not solved with Intel® DAAL"
else:
raise ValueError(f"Invalid input - expected one of 'daal','sklearn', 'sklearn_after_daal', got {s}")
return message
|
def get_patch_message(s):
if s == "daal":
message = "uses Intel® DAAL solver"
elif s == "sklearn":
message = "uses original Scikit-learn solver"
elif s == "sklearn_after_daal":
message = "uses original Scikit-learn solver, because the task was not solved with Intel® DAAL"
else:
raise ValueError(f"Invalid input - expected one of 'daal','sklearn', 'sklearn_after_daal', got {s}")
return message
|
4,296 |
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
def parse_nedf_header(filename):
"""Read header information from the first 10kB of an .nedf file.
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
31,242 |
def clone_team():
team_id = demisto.args().get('team_id')
parts_to_clone = []
if argToBoolean(demisto.args().get('clone_apps', 'true')):
parts_to_clone.append('apps')
if argToBoolean(demisto.args().get('clone_tabs', 'true')):
parts_to_clone.append('tabs')
if argToBoolean(demisto.args().get('clone_settings', 'true')):
parts_to_clone.append('settings')
if argToBoolean(demisto.args().get('clone_channels', 'true')):
parts_to_clone.append('channels')
if not parts_to_clone:
raise ValueError('At least one of the parts of the team must be cloned: apps, tabs, settings, channels')
clone_team_request(
team_id=team_id,
display_name=demisto.args().get('display_name'),
description=demisto.args().get('description'),
visibility=demisto.args().get('visibility'),
parts_to_clone=','.join(parts_to_clone),
)
demisto.results(f'Team {team_id} was cloned successfully.')
|
def clone_team():
team_id = demisto.args().get('team_id')
parts_to_clone = []
if argToBoolean(demisto.args().get('clone_apps', 'true')):
parts_to_clone.append('apps')
if argToBoolean(demisto.args().get('clone_tabs', 'true')):
parts_to_clone.append('tabs')
if argToBoolean(demisto.args().get('clone_settings', 'true')):
parts_to_clone.append('settings')
if argToBoolean(demisto.args().get('clone_channels', 'true')):
parts_to_clone.append('channels')
if not parts_to_clone:
raise ValueError('At least one of the parts of the team must be cloned: apps, tabs, settings, channels')
clone_team_request(
team_id=team_id,
display_name=demisto.args().get('display_name'),
description=demisto.args().get('description'),
visibility=demisto.args().get('visibility'),
parts_to_clone=','.join(parts_to_clone),
)
return_results(f'Team {team_id} was cloned successfully.')
|
32,875 |
def get_version():
try:
from ._version import version
return version
except Exception:
try:
# something went wrong while creating _version.py, let's fallback to pkg_resources
import pkg_resources
return pkg_resources.get_distribution(__name__).version
except Exception:
# package is not installed
return "dev"
|
def get_version():
try:
from ._version import version
return version
except ImportError:
try:
# something went wrong while creating _version.py, let's fallback to pkg_resources
import pkg_resources
return pkg_resources.get_distribution(__name__).version
except Exception:
# package is not installed
return "dev"
|
13,565 |
def QR_iteration(H, shifts):
"""Perform the QR iteration.
Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an
unreduced upper Hessenberg matrix. If a complex shift occurs a double step is
peformed in order to avoid complex arithmetic.
Parameters
----------
H
The |NumPy array| H which is an unreduced upper Hessenberg matrix.
shifts
A |NumPy array| which contains the shifts that are to be applied in the QR steps.
Returns
-------
Hs
A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`.
Qs
The product of the orthogonal matrices computed in each QR step.
"""
Qs = np.eye(len(H))
i = 0
while i < len(shifts) - 1:
s = shifts[i]
if shifts[i].imag != 0:
Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H)))
i = i + 2
else:
Q, R = np.linalg.qr(H - s * np.eye(len(H)))
i = i + 1
Qs = Qs @ Q
H = Q.T @ H @ Q
return H, Qs
|
def QR_iteration(H, shifts):
"""Perform the QR iteration.
Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an
unreduced upper Hessenberg matrix. If a complex shift occurs a double step is
peformed in order to avoid complex arithmetic.
Parameters
----------
H
The |NumPy array| H which is an unreduced upper Hessenberg matrix.
shifts
A |NumPy array| which contains the shifts that are to be applied in the QR steps.
Returns
-------
Hs
A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`.
Qs
The product of the orthogonal matrices computed in each QR step.
"""
Qs = np.eye(len(H))
i = 0
while i < len(shifts) - 1:
s = shifts[i]
if shifts[i].imag != 0:
Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H)))
i = i + 2
else:
Q, R = np.linalg.qr(H - s * np.eye(len(H)))
i += 1
Qs = Qs @ Q
H = Q.T @ H @ Q
return H, Qs
|
22,288 |
def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'):
"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""
# signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better
CHUNK_SIZE = 1048576
try:
codecs.lookup(target_encoding)
except Exception:
target_encoding = util.DEFAULT_ENCODING # utf-8
try_source_encoding = True
while True:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
break
if source_encoding is not None and try_source_encoding:
# If a source encoding is given we use it to convert to the target encoding
try:
if not isinstance(chunk, text_type):
chunk = chunk.decode(source_encoding, source_error)
os.write(fd, chunk.encode(target_encoding, target_error))
except UnicodeDecodeError:
try_source_encoding = False
os.write(fd, chunk)
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
if isinstance(chunk, text_type):
chunk = chunk.encode(target_encoding, target_error)
os.write(fd, chunk)
os.close(fd)
return filename
|
def stream_to_open_named_file(stream, fd, filename, source_encoding=None, source_error='strict', target_encoding=None, target_error='strict'):
"""Writes a stream to the provided file descriptor, returns the file name. Closes file descriptor"""
# signature and behavor is somewhat odd, due to backwards compatibility, but this can/should be done better
CHUNK_SIZE = 1048576
try:
codecs.lookup(target_encoding)
except Exception:
target_encoding = util.DEFAULT_ENCODING # utf-8
use_source_encoding = source_encoding is not None
while True:
chunk = stream.read(CHUNK_SIZE)
if not chunk:
break
if source_encoding is not None and try_source_encoding:
# If a source encoding is given we use it to convert to the target encoding
try:
if not isinstance(chunk, text_type):
chunk = chunk.decode(source_encoding, source_error)
os.write(fd, chunk.encode(target_encoding, target_error))
except UnicodeDecodeError:
try_source_encoding = False
os.write(fd, chunk)
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
if isinstance(chunk, text_type):
chunk = chunk.encode(target_encoding, target_error)
os.write(fd, chunk)
os.close(fd)
return filename
|
15,404 |
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in get_sensor_types(device_sub_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
55,180 |
def test_docstring_example_of_operator_class(tol):
"""Tests an example of how to create an operator which is used in the
Operator class docstring, as well as in the 'adding_operators'
page in the developer guide."""
import pennylane as qml
class FlipAndRotate(qml.operation.Operation):
num_wires = qml.operation.AnyWires
grad_method = "A"
def __init__(self, angle, wire_rot, wire_flip=None, do_flip=False, do_queue=True, id=None):
if do_flip and wire_flip is None:
raise ValueError("Expected a wire to flip; got None.")
self._hyperparameters = {"do_flip": do_flip}
all_wires = qml.wires.Wires(wire_rot) + qml.wires.Wires(wire_flip)
super().__init__(angle, wires=all_wires, do_queue=do_queue, id=id)
@property
def num_params(self):
return 1
@property
def ndim_params(self):
return (0,)
@staticmethod
def compute_decomposition(angle, wires, do_flip): # pylint: disable=arguments-differ
op_list = []
if do_flip:
op_list.append(qml.PauliX(wires=wires[1]))
op_list.append(qml.RX(angle, wires=wires[0]))
return op_list
def adjoint(self):
return FlipAndRotate(
-self.parameters[0],
self.wires[0],
self.wires[1],
do_flip=self.hyperparameters["do_flip"],
)
dev = qml.device("default.qubit", wires=["q1", "q2", "q3"])
@qml.qnode(dev)
def circuit(angle):
FlipAndRotate(angle, wire_rot="q1", wire_flip="q1")
return qml.expval(qml.PauliZ("q1"))
a = np.array(3.14)
res = circuit(a)
expected = -0.9999987318946099
assert np.allclose(res, expected, atol=tol)
|
def test_docstring_example_of_operator_class(tol):
"""Tests an example of how to create an operator which is used in the
Operator class docstring, as well as in the 'adding_operators'
page in the developer guide."""
import pennylane as qml
class FlipAndRotate(qml.operation.Operation):
num_wires = qml.operation.AnyWires
grad_method = "A"
def __init__(self, angle, wire_rot, wire_flip=None, do_flip=False, do_queue=True, id=None):
if do_flip and wire_flip is None:
raise ValueError("Expected a wire to flip; got None.")
self._hyperparameters = {"do_flip": do_flip}
all_wires = qml.wires.Wires(wire_rot) + qml.wires.Wires(wire_flip)
super().__init__(angle, wires=all_wires, do_queue=do_queue, id=id)
@property
def num_params(self):
return 1
@property
def ndim_params(self):
return (0,)
@staticmethod
def compute_decomposition(angle, wires, do_flip): # pylint: disable=arguments-differ
op_list = []
if do_flip:
op_list.append(qml.PauliX(wires=wires[1]))
op_list.append(qml.RX(angle, wires=wires[0]))
return op_list
def adjoint(self):
return FlipAndRotate(
-self.parameters[0],
self.wires[0],
self.wires[1],
do_flip=self.hyperparameters["do_flip"],
)
dev = qml.device("default.qubit", wires=["q1", "q2", "q3"])
@qml.qnode(dev)
def circuit(angle):
FlipAndRotate(angle, wire_rot="q1", wire_flip="q1")
return qml.expval(qml.PauliZ("q1"))
a = np.array(3.14)
res = circuit(a)
expected = -0.9999987318946099
assert np.allclose(res, expected, atol=tol)
|
56,732 |
def _draw_value(param, point=None, givens=None, size=None):
"""Draw a random value from a distribution or return a constant.
Parameters
----------
param: number, array like, theano variable or pymc3 random variable
The value or distribution. Constants or shared variables
will be converted to an array and returned. Theano variables
are evaluated. If `param` is a pymc3 random variables, draw
a new value from it and return that, unless a value is specified
in `point`.
point: dict, optional
A dictionary from pymc3 variable names to their values.
givens: dict, optional
A dictionary from theano variables to their values. These values
are used to evaluate `param` if it is a theano variable.
size: int, optional
Number of samples
"""
if isinstance(param, (numbers.Number, np.ndarray)):
return param
elif isinstance(param, theano_constant):
return param.value
elif isinstance(param, tt.sharedvar.SharedVariable):
return param.get_value()
elif isinstance(param, (tt.TensorVariable, MultiObservedRV)):
if point and hasattr(param, 'model') and param.name in point:
return point[param.name]
elif hasattr(param, 'random') and param.random is not None:
print(point)
return param.random(point=point, size=size)
elif (hasattr(param, 'distribution') and
hasattr(param.distribution, 'random') and
param.distribution.random is not None):
if hasattr(param, 'observations'):
# shape inspection for ObservedRV
dist_tmp = param.distribution
try:
distshape = param.observations.shape.eval()
except AttributeError:
distshape = param.observations.shape
dist_tmp.shape = distshape
try:
return dist_tmp.random(point=point, size=size)
except (ValueError, TypeError):
# reset shape to account for shape changes
# with theano.shared inputs
dist_tmp.shape = np.array([])
# We want to draw values to infer the dist_shape,
# we don't want to store these drawn values to the context
with _DrawValuesContextBlocker():
val = np.atleast_1d(dist_tmp.random(point=point,
size=None))
# Sometimes point may change the size of val but not the
# distribution's shape
if point and size is not None:
temp_size = np.atleast_1d(size)
if all(val.shape[:len(temp_size)] == temp_size):
dist_tmp.shape = val.shape[len(temp_size):]
else:
dist_tmp.shape = val.shape
return dist_tmp.random(point=point, size=size)
else:
return param.distribution.random(point=point, size=size)
else:
if givens:
variables, values = list(zip(*givens))
else:
variables = values = []
# We only truly care if the ancestors of param that were given
# value have the matching dshape and val.shape
param_ancestors = \
set(theano.gof.graph.ancestors([param],
blockers=list(variables))
)
inputs = [(var, val) for var, val in
zip(variables, values)
if var in param_ancestors]
if inputs:
input_vars, input_vals = list(zip(*inputs))
else:
input_vars = []
input_vals = []
func = _compile_theano_function(param, input_vars)
output = func(*input_vals)
return output
raise ValueError('Unexpected type in draw_value: %s' % type(param))
|
def _draw_value(param, point=None, givens=None, size=None):
"""Draw a random value from a distribution or return a constant.
Parameters
----------
param: number, array like, theano variable or pymc3 random variable
The value or distribution. Constants or shared variables
will be converted to an array and returned. Theano variables
are evaluated. If `param` is a pymc3 random variables, draw
a new value from it and return that, unless a value is specified
in `point`.
point: dict, optional
A dictionary from pymc3 variable names to their values.
givens: dict, optional
A dictionary from theano variables to their values. These values
are used to evaluate `param` if it is a theano variable.
size: int, optional
Number of samples
"""
if isinstance(param, (numbers.Number, np.ndarray)):
return param
elif isinstance(param, theano_constant):
return param.value
elif isinstance(param, tt.sharedvar.SharedVariable):
return param.get_value()
elif isinstance(param, (tt.TensorVariable, MultiObservedRV)):
if point and hasattr(param, 'model') and param.name in point:
return point[param.name]
elif hasattr(param, 'random') and param.random is not None:
return param.random(point=point, size=size)
elif (hasattr(param, 'distribution') and
hasattr(param.distribution, 'random') and
param.distribution.random is not None):
if hasattr(param, 'observations'):
# shape inspection for ObservedRV
dist_tmp = param.distribution
try:
distshape = param.observations.shape.eval()
except AttributeError:
distshape = param.observations.shape
dist_tmp.shape = distshape
try:
return dist_tmp.random(point=point, size=size)
except (ValueError, TypeError):
# reset shape to account for shape changes
# with theano.shared inputs
dist_tmp.shape = np.array([])
# We want to draw values to infer the dist_shape,
# we don't want to store these drawn values to the context
with _DrawValuesContextBlocker():
val = np.atleast_1d(dist_tmp.random(point=point,
size=None))
# Sometimes point may change the size of val but not the
# distribution's shape
if point and size is not None:
temp_size = np.atleast_1d(size)
if all(val.shape[:len(temp_size)] == temp_size):
dist_tmp.shape = val.shape[len(temp_size):]
else:
dist_tmp.shape = val.shape
return dist_tmp.random(point=point, size=size)
else:
return param.distribution.random(point=point, size=size)
else:
if givens:
variables, values = list(zip(*givens))
else:
variables = values = []
# We only truly care if the ancestors of param that were given
# value have the matching dshape and val.shape
param_ancestors = \
set(theano.gof.graph.ancestors([param],
blockers=list(variables))
)
inputs = [(var, val) for var, val in
zip(variables, values)
if var in param_ancestors]
if inputs:
input_vars, input_vals = list(zip(*inputs))
else:
input_vars = []
input_vals = []
func = _compile_theano_function(param, input_vars)
output = func(*input_vals)
return output
raise ValueError('Unexpected type in draw_value: %s' % type(param))
|
34,710 |
def test_get_latest_model(tmpdir: Path):
path = tmpdir / "test_get_latest_model"
path.mkdir()
Path(path / "model_one.tar.gz").touch()
# create second model later to be registered as distinct in Windows
time.sleep(0.1)
Path(path / "model_two.tar.gz").touch()
path_of_latest = os.path.join(path, "model_two.tar.gz")
assert get_latest_model(str(path)) == path_of_latest
|
def test_get_latest_model(tmp_path: Path):
path = tmpdir / "test_get_latest_model"
path.mkdir()
Path(path / "model_one.tar.gz").touch()
# create second model later to be registered as distinct in Windows
time.sleep(0.1)
Path(path / "model_two.tar.gz").touch()
path_of_latest = os.path.join(path, "model_two.tar.gz")
assert get_latest_model(str(path)) == path_of_latest
|
50,859 |
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
if "7E" in family:
dev_type = "EDS00xx"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
def get_entities(onewirehub: OneWireHub, config):
"""Get a list of entities."""
entities = []
device_names = {}
if CONF_NAMES in config:
if isinstance(config[CONF_NAMES], dict):
device_names = config[CONF_NAMES]
conf_type = config[CONF_TYPE]
# We have an owserver on a remote(or local) host/port
if conf_type == CONF_TYPE_OWSERVER:
for device in onewirehub.devices:
family = device["family"]
device_type = device["type"]
device_id = os.path.split(os.path.split(device["path"])[0])[1]
dev_type = "std"
device_path = device["path"]
if "EF" in family:
dev_type = "HobbyBoard"
family = device_type
elif "7E" in family:
device_sub_type = "EDS"
family = onewirehub.owproxy.read(f"{device_path}device_type").decode()
if family not in hb_info_from_type(dev_type):
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
device_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, device_id)},
"manufacturer": "Maxim Integrated",
"model": device_type,
"name": device_id,
}
for entity_specs in hb_info_from_type(dev_type)[family]:
if entity_specs["type"] == SENSOR_TYPE_MOISTURE:
s_id = entity_specs["path"].split(".")[1]
is_leaf = int(
onewirehub.owproxy.read(
f"{device['path']}moisture/is_leaf.{s_id}"
).decode()
)
if is_leaf:
entity_specs["type"] = SENSOR_TYPE_WETNESS
entity_specs["name"] = f"Wetness {s_id}"
entity_path = os.path.join(
os.path.split(device["path"])[0], entity_specs["path"]
)
entities.append(
OneWireProxySensor(
device_id=device_id,
device_name=device_names.get(device_id, device_id),
device_info=device_info,
entity_path=entity_path,
entity_specs=entity_specs,
owproxy=onewirehub.owproxy,
)
)
# We have a raw GPIO ow sensor on a Pi
elif conf_type == CONF_TYPE_SYSBUS:
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using SysBus %s", base_dir)
for p1sensor in onewirehub.devices:
family = p1sensor.mac_address[:2]
sensor_id = f"{family}-{p1sensor.mac_address[2:]}"
if family not in DEVICE_SUPPORT_SYSBUS:
_LOGGER.warning(
"Ignoring unknown family (%s) of sensor found for device: %s",
family,
sensor_id,
)
continue
device_info = {
"identifiers": {(DOMAIN, sensor_id)},
"manufacturer": "Maxim Integrated",
"model": family,
"name": sensor_id,
}
device_file = f"/sys/bus/w1/devices/{sensor_id}/w1_slave"
entities.append(
OneWireDirectSensor(
device_names.get(sensor_id, sensor_id),
device_file,
device_info,
p1sensor,
)
)
if not entities:
_LOGGER.error(
"No onewire sensor found. Check if dtoverlay=w1-gpio "
"is in your /boot/config.txt. "
"Check the mount_dir parameter if it's defined"
)
# We have an owfs mounted
else: # pragma: no cover
# This part of the implementation does not conform to policy regarding 3rd-party libraries, and will not longer be updated.
# https://developers.home-assistant.io/docs/creating_platform_code_review/#5-communication-with-devicesservices
base_dir = config[CONF_MOUNT_DIR]
_LOGGER.debug("Initializing using OWFS %s", base_dir)
_LOGGER.warning(
"The OWFS implementation of 1-Wire sensors is deprecated, "
"and should be migrated to OWServer (on localhost:4304). "
"If migration to OWServer is not feasible on your installation, "
"please raise an issue at https://github.com/home-assistant/core/issues/new"
"?title=Unable%20to%20migrate%20onewire%20from%20OWFS%20to%20OWServer",
)
for family_file_path in glob(os.path.join(base_dir, "*", "family")):
with open(family_file_path) as family_file:
family = family_file.read()
if "EF" in family:
continue
if family in DEVICE_SENSORS:
for sensor_key, sensor_value in DEVICE_SENSORS[family].items():
sensor_id = os.path.split(os.path.split(family_file_path)[0])[1]
device_file = os.path.join(
os.path.split(family_file_path)[0], sensor_value
)
entities.append(
OneWireOWFSSensor(
device_names.get(sensor_id, sensor_id),
device_file,
sensor_key,
)
)
return entities
|
8,900 |
def test_list_parse_legacy_coma_strip():
option = types.ListAttribute('foo', strip=False)
assert option.parse("""value 1, # value 2 , value 3""") == [
'value 1',
' # value 2 ',
' value 3',
]
|
def test_list_parse_legacy_comma_no_strip():
option = types.ListAttribute('foo', strip=False)
assert option.parse("""value 1, # value 2 , value 3""") == [
'value 1',
' # value 2 ',
' value 3',
]
|
2,448 |
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Gram matrix X.T * Y.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
|
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Gram matrix X.T * Y.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
|
8,850 |
def search(*patterns):
"""Decorate a function to be called when a pattern is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@search('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more search rules::
@search('here')
@search('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here" (once per expression)
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a PRIVMSG, where a part
of a string matching this expression is said, the function will execute.
Note that captured groups here will be retrievable through the
:class:`~sopel.trigger.Trigger` object later. The match will also contains
the position of the first instance found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@search('$nickname')
# will trigger once when the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for the first instance only, starting from
the left of the line, and the function will execute only once per
regular expression.
To match for each time the expression is found, use the :func:`find`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "search_rules"):
function.search_rules = []
for value in patterns:
if value not in function.search_rules:
function.search_rules.append(value)
return function
return add_attribute
|
def search(*patterns):
"""Decorate a function to be called when a pattern is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@search('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more search rules::
@search('here')
@search('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here" (once per expression)
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a PRIVMSG, where a part
of a string matching this expression is said, the function will execute.
Note that captured groups here will be retrievable through the
:class:`~sopel.trigger.Trigger` object later. The match will also contain
the position of the first instance found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@search('$nickname')
# will trigger once when the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for the first instance only, starting from
the left of the line, and the function will execute only once per
regular expression.
To match for each time the expression is found, use the :func:`find`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "search_rules"):
function.search_rules = []
for value in patterns:
if value not in function.search_rules:
function.search_rules.append(value)
return function
return add_attribute
|
49,731 |
def on_plugin_available(func: Callable = None,
plugin: Optional[str] = None):
"""
Method decorator used to handle plugin availability on Spyder.
The methods that use this decorator must have the following signature
`def method(self)` when observing a single plugin and
`def method(self, plugin): ...` when observing multiple plugins or
all the plugins that were listed as dependencies
Parameters
----------
func: Callable
Method to decorate. Given by default when applying the decorator.
plugin: Optional[str]
Name of the requested plugin whose availability triggers the method.
Returns
-------
func: Callable
The same method that was given as input.
"""
if func is None:
return functools.partial(on_plugin_available, plugin=plugin)
if plugin is None:
# Use special __all identifier to signal that the function
# observes all plugins listed as dependencies.
plugin = '__all'
func._plugin_listen = plugin
return func
|
def on_plugin_available(func: Callable = None,
plugin: Optional[str] = None):
"""
Method decorator used to handle plugin availability on Spyder.
The methods that use this decorator must have the following signature:
`def method(self)` when observing a single plugin or
`def method(self, plugin): ...` when observing multiple plugins or
all plugins that were listed as dependencies.
Parameters
----------
func: Callable
Method to decorate. Given by default when applying the decorator.
plugin: Optional[str]
Name of the requested plugin whose availability triggers the method.
Returns
-------
func: Callable
The same method that was given as input.
"""
if func is None:
return functools.partial(on_plugin_available, plugin=plugin)
if plugin is None:
# Use special __all identifier to signal that the function
# observes all plugins listed as dependencies.
plugin = '__all'
func._plugin_listen = plugin
return func
|
27,023 |
def downgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=False)
op.drop_column('serialized_dag', 'data_compressed')
|
def downgrade():
with op.batch_alter_table('serialized_dag') as batch_op:
batch_op.alter_column('data', existing_type=sa.JSON, nullable=False)
batch_op.drop_column('data_compressed')
|
2,270 |
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : ndarray of shape (n_samples,)
The target vector.
Returns
-------
f_statistic : ndarray of shape of (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
|
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : ndarray of shape (n_samples,)
The target vector.
Returns
-------
f_statistic : ndarray of shape (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
|
2,584 |
def test_unsupervised_radius_neighbors(
global_dtype, n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0
):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features).astype(global_dtype)
test = rng.rand(n_query_pts, n_features).astype(global_dtype)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_allclose(np.concatenate(list(ind)), np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_allclose(
np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0])),
),
assert_allclose(
np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])),
)
|
def test_unsupervised_radius_neighbors(
global_dtype, n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0
):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features).astype(global_dtype)
test = rng.rand(n_query_pts, n_features).astype(global_dtype)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_equal(np.concatenate(list(ind)), np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_allclose(
np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0])),
),
assert_allclose(
np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])),
)
|
32,278 |
def panorama_commit_command(args: dict):
"""
Commit and show message in the war room
"""
use_polling = args.get('polling', 'false') == 'true'
interval_in_seconds = int(args.get('interval', '60'))
timeout = int(args.get('timeout', '600'))
job_id = args.get('job_id')
script_results = []
# Support polling
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
# Create a new commit job
if not job_id:
result = panorama_commit(args)
if 'result' in result['response']:
# commit has been given a jobid
job_id = result['response']['result']['job']
commit_output = {
'JobID': job_id,
'Status': 'Pending',
'Description': args.get('description')
}
polling_args = {
'job_id': job_id,
'polling': 'true',
**args
}
scheduled_command = ScheduledCommand(
command=demisto.command(),
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Committing any pending changes (Job ID: {job_id})"
script_results.append(CommandResults(
outputs_prefix='Panorama.Commit',
outputs_key_field='JobID',
outputs=commit_output,
readable_output=readable_output,
scheduled_command=scheduled_command,
ignore_auto_extract=True
))
else:
script_results.append("There are no pending changes to commit.")
# Check existing job
else:
result = panorama_commit_status(args)
if result['response']['result']['job']['type'] != 'Commit':
raise Exception('JobID given is not of a commit.')
# Reschedule job if it's not complete
if result['response']['result']['job']['status'] != 'FIN':
polling_args = {
'job_id': job_id,
'polling': 'true',
**args
}
scheduled_command = ScheduledCommand(
command=demisto.command(),
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
# Check the status of a completed job
else:
commit_status_output = {'JobID': result['response']['result']['job']['id']}
if result['response']['result']['job']['result'] == 'OK':
commit_status_output['Status'] = 'Completed'
else:
commit_status_output['Status'] = 'Failed'
commit_status_output['Details'] = result['response']['result']['job']['details']['line']
status_warnings = []
if result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}):
status_warnings = result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}).get(
'line',
[])
ignored_error = 'configured with no certificate profile'
commit_status_output["Warnings"] = [item for item in status_warnings if item not in ignored_error]
readable_output = tableToMarkdown(
'Commit status:',
commit_status_output,
['JobID', 'Status', 'Details', 'Warnings'],
removeNull=True
)
script_results.append(CommandResults(
outputs_prefix='Panorama.Commit',
outputs_key_field='JobID',
outputs=commit_status_output,
readable_output=readable_output,
ignore_auto_extract=True
))
return_results(script_results)
else:
result = panorama_commit(args)
if 'result' in result['response']:
# commit has been given a jobid
commit_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'Description': args.get('description')
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Commit:', commit_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {
"Panorama.Commit(val.JobID == obj.JobID)": commit_output
}
})
else:
# no changes to commit
script_results.append(result['response']['msg'])
return_results(script_results)
|
def panorama_commit_command(args: dict):
"""
Commit and show message in the war room
"""
use_polling = args.get('polling', 'false') == 'true'
interval_in_seconds = int(args.get('interval', '60'))
timeout = int(args.get('timeout', '600'))
job_id = args.get('job_id')
script_results = []
# Support polling
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
# Create a new commit job
if not job_id:
result = panorama_commit(args)
if job_id := result.get('response', {}).get('result', {}).get('job_id', ''):
# commit has been given a jobid
job_id = result['response']['result']['job']
commit_output = {
'JobID': job_id,
'Status': 'Pending',
'Description': args.get('description')
}
polling_args = {
'job_id': job_id,
'polling': 'true',
**args
}
scheduled_command = ScheduledCommand(
command=demisto.command(),
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Committing any pending changes (Job ID: {job_id})"
script_results.append(CommandResults(
outputs_prefix='Panorama.Commit',
outputs_key_field='JobID',
outputs=commit_output,
readable_output=readable_output,
scheduled_command=scheduled_command,
ignore_auto_extract=True
))
else:
script_results.append("There are no pending changes to commit.")
# Check existing job
else:
result = panorama_commit_status(args)
if result['response']['result']['job']['type'] != 'Commit':
raise Exception('JobID given is not of a commit.')
# Reschedule job if it's not complete
if result['response']['result']['job']['status'] != 'FIN':
polling_args = {
'job_id': job_id,
'polling': 'true',
**args
}
scheduled_command = ScheduledCommand(
command=demisto.command(),
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
# Check the status of a completed job
else:
commit_status_output = {'JobID': result['response']['result']['job']['id']}
if result['response']['result']['job']['result'] == 'OK':
commit_status_output['Status'] = 'Completed'
else:
commit_status_output['Status'] = 'Failed'
commit_status_output['Details'] = result['response']['result']['job']['details']['line']
status_warnings = []
if result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}):
status_warnings = result.get("response", {}).get('result', {}).get('job', {}).get('warnings', {}).get(
'line',
[])
ignored_error = 'configured with no certificate profile'
commit_status_output["Warnings"] = [item for item in status_warnings if item not in ignored_error]
readable_output = tableToMarkdown(
'Commit status:',
commit_status_output,
['JobID', 'Status', 'Details', 'Warnings'],
removeNull=True
)
script_results.append(CommandResults(
outputs_prefix='Panorama.Commit',
outputs_key_field='JobID',
outputs=commit_status_output,
readable_output=readable_output,
ignore_auto_extract=True
))
return_results(script_results)
else:
result = panorama_commit(args)
if 'result' in result['response']:
# commit has been given a jobid
commit_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'Description': args.get('description')
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Commit:', commit_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {
"Panorama.Commit(val.JobID == obj.JobID)": commit_output
}
})
else:
# no changes to commit
script_results.append(result['response']['msg'])
return_results(script_results)
|
8,948 |
def _execute_perform(bot):
"""Execute commands specified to perform on IRC server connect.
This function executes the list of commands that can be found in the
``core.commands_on_connect`` setting. It automatically replace any
``$nickname`` placeholder in the command with the bot's configured nick.
"""
if not bot.connection_registered:
# How did you even get this command, bot?
raise Exception('Bot must be connected to server to perform commands.')
commands = bot.config.core.commands_on_connect
count = len(commands)
if not count:
LOGGER.info('No custom command to execute.')
return
LOGGER.info('Executing %d custom commands.', count)
for i, command in enumerate(commands, 1):
command = command.replace('$nickname', bot.config.core.nick)
LOGGER.debug(
'Executing custom command [%d/%d]: %s', i, count, command)
bot.write((command,))
|
def _execute_perform(bot):
"""Execute commands specified to perform on IRC server connect.
This function executes the list of commands that can be found in the
``core.commands_on_connect`` setting. It automatically replaces any
``$nickname`` placeholder in the command with the bot's configured nick.
"""
if not bot.connection_registered:
# How did you even get this command, bot?
raise Exception('Bot must be connected to server to perform commands.')
commands = bot.config.core.commands_on_connect
count = len(commands)
if not count:
LOGGER.info('No custom command to execute.')
return
LOGGER.info('Executing %d custom commands.', count)
for i, command in enumerate(commands, 1):
command = command.replace('$nickname', bot.config.core.nick)
LOGGER.debug(
'Executing custom command [%d/%d]: %s', i, count, command)
bot.write((command,))
|
24,698 |
def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default qos settings of the entity being created, that will be overriden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided qos overrides') + ', are not valid')
|
def _declare_qos_parameteres(
entity_type: Union[Type[Publisher], Type[Subscription]],
node: 'Node',
topic_name: Text,
qos: QoSProfile,
options: QoSOverridingOptions
) -> QoSProfile:
"""
Declare qos parameters for a Publisher or a Subscription.
:param entity_type: Either `rclpy.node.Publisher` or `rclpy.node.Subscription`.
:param node: Node used to declare the parameters.
:param topic_name: Topic name of the entity being created.
:param qos: Default QoS settings of the entity being created, that will be overridden
with the user provided qos parameter overrides.
:param options: Options that indicates which parameters are going to be declared.
"""
if not issubclass(entity_type, (Publisher, Subscription)):
raise TypeError('Argument `entity_type` should be a subclass of Publisher or Subscription')
entity_type_str = 'publisher' if issubclass(entity_type, Publisher) else Subscription
id_suffix = '' if options.entity_id is None else f'_{options.entity_id}'
name = f'qos_overrides.{topic_name}.{entity_type_str}{id_suffix}.' '{}'
description = '{}' f' for {entity_type_str} `{topic_name}` with id `{options.entity_id}`'
allowed_policies = _get_allowed_policies(entity_type)
for policy in options.policy_kinds:
if policy not in allowed_policies:
continue
policy_name = policy.name.lower()
descriptor = ParameterDescriptor()
descriptor.description = description.format(policy_name)
descriptor.read_only = True
param = node.declare_parameter(
name.format(policy_name),
_get_qos_policy_parameter(qos, policy),
descriptor)
_override_qos_policy_with_param(qos, policy, param)
if options.callback is not None and not options.callback(qos):
raise InvalidQosOverridesError(
description.format('Provided qos overrides') + ', are not valid')
|
30,112 |
def containment_to_distance(
containment,
ksize,
scaled,
n_unique_kmers=None,
sequence_len_bp=None,
confidence=0.95,
return_identity=False,
return_ci=False,
prob_threshold=10.0 ** (-3),
):
"""
Containment --> distance CI (one step)
"""
sol1, sol2, point_estimate = None, None, None
if sequence_len_bp and not n_unique_kmers:
n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize)
if containment <= 0.0001:
sol2 = sol1 = point_estimate = 1.0
elif containment >= 0.9999:
sol1 = sol2 = point_estimate = 0.0
else:
point_estimate = 1.0 - containment ** (1.0 / ksize)
if return_ci:
try:
alpha = 1 - confidence
z_alpha = probit(1 - alpha / 2)
f_scaled = (
1.0 / scaled
) # these use scaled as a fraction between 0 and 1
bias_factor = 1 - (1 - f_scaled) ** n_unique_kmers
term_1 = (1.0 - f_scaled) / (
f_scaled * n_unique_kmers**3 * bias_factor**2
)
term_2 = lambda pest: n_unique_kmers * exp_n_mutated(
n_unique_kmers, ksize, pest
) - exp_n_mutated_squared(n_unique_kmers, ksize, pest)
term_3 = lambda pest: var_n_mutated(n_unique_kmers, ksize, pest) / (
n_unique_kmers**2
)
var_direct = lambda pest: term_1 * term_2(pest) + term_3(pest)
f1 = (
lambda pest: (1 - pest) ** ksize
+ z_alpha * sqrt(var_direct(pest))
- containment
)
f2 = (
lambda pest: (1 - pest) ** ksize
- z_alpha * sqrt(var_direct(pest))
- containment
)
sol1 = brentq(f1, 0.0000001, 0.9999999)
sol2 = brentq(f2, 0.0000001, 0.9999999)
except ValueError as exc:
# afaict, this only happens with extremely small test data
notify(
"WARNING: Cannot estimate ANI from containment. Do your sketches contain enough hashes?"
)
notify(str(exc))
return None, None, None
# Do this here, so that we don't need to reconvert distance <--> identity later.
prob_nothing_in_common = get_exp_probability_nothing_common(
point_estimate, ksize, scaled, n_unique_kmers=n_unique_kmers
)
if prob_nothing_in_common >= prob_threshold:
# TO DO: keep count; suggest user decrease scaled value. If that is unsuccessful, maybe decrease ksize
notify(
"WARNING: These sketches may have no hashes in common based on chance alone."
)
if return_identity:
if any([sol1 is None, sol2 is None]):
point_estimate = distance_to_identity(point_estimate)
else:
point_estimate, sol2, sol1 = distance_to_identity(
point_estimate, sol2, sol1
)
if return_ci:
return point_estimate, sol2, sol1, prob_nothing_in_common
return point_estimate, prob_nothing_in_common
|
def containment_to_distance(
containment,
ksize,
scaled,
n_unique_kmers=None,
sequence_len_bp=None,
confidence=0.95,
return_identity=False,
return_ci=False,
prob_threshold=1e-3,
):
"""
Containment --> distance CI (one step)
"""
sol1, sol2, point_estimate = None, None, None
if sequence_len_bp and not n_unique_kmers:
n_unique_kmers = sequence_len_to_n_kmers(sequence_len_bp, ksize)
if containment <= 0.0001:
sol2 = sol1 = point_estimate = 1.0
elif containment >= 0.9999:
sol1 = sol2 = point_estimate = 0.0
else:
point_estimate = 1.0 - containment ** (1.0 / ksize)
if return_ci:
try:
alpha = 1 - confidence
z_alpha = probit(1 - alpha / 2)
f_scaled = (
1.0 / scaled
) # these use scaled as a fraction between 0 and 1
bias_factor = 1 - (1 - f_scaled) ** n_unique_kmers
term_1 = (1.0 - f_scaled) / (
f_scaled * n_unique_kmers**3 * bias_factor**2
)
term_2 = lambda pest: n_unique_kmers * exp_n_mutated(
n_unique_kmers, ksize, pest
) - exp_n_mutated_squared(n_unique_kmers, ksize, pest)
term_3 = lambda pest: var_n_mutated(n_unique_kmers, ksize, pest) / (
n_unique_kmers**2
)
var_direct = lambda pest: term_1 * term_2(pest) + term_3(pest)
f1 = (
lambda pest: (1 - pest) ** ksize
+ z_alpha * sqrt(var_direct(pest))
- containment
)
f2 = (
lambda pest: (1 - pest) ** ksize
- z_alpha * sqrt(var_direct(pest))
- containment
)
sol1 = brentq(f1, 0.0000001, 0.9999999)
sol2 = brentq(f2, 0.0000001, 0.9999999)
except ValueError as exc:
# afaict, this only happens with extremely small test data
notify(
"WARNING: Cannot estimate ANI from containment. Do your sketches contain enough hashes?"
)
notify(str(exc))
return None, None, None
# Do this here, so that we don't need to reconvert distance <--> identity later.
prob_nothing_in_common = get_exp_probability_nothing_common(
point_estimate, ksize, scaled, n_unique_kmers=n_unique_kmers
)
if prob_nothing_in_common >= prob_threshold:
# TO DO: keep count; suggest user decrease scaled value. If that is unsuccessful, maybe decrease ksize
notify(
"WARNING: These sketches may have no hashes in common based on chance alone."
)
if return_identity:
if any([sol1 is None, sol2 is None]):
point_estimate = distance_to_identity(point_estimate)
else:
point_estimate, sol2, sol1 = distance_to_identity(
point_estimate, sol2, sol1
)
if return_ci:
return point_estimate, sol2, sol1, prob_nothing_in_common
return point_estimate, prob_nothing_in_common
|
41,307 |
def get_metric_name(metric):
if isinstance(metric, tuple):
names, metric = metric
elif hasattr(metric, '__name__'):
names = metric.__name__
elif hasattr(metric, '__class__'):
names = camel_to_snake(metric.__class__.__name__)
else:
names = 'unknown_metric'
return names, metric
|
def get_name_of_metric(metric):
if isinstance(metric, tuple):
names, metric = metric
elif hasattr(metric, '__name__'):
names = metric.__name__
elif hasattr(metric, '__class__'):
names = camel_to_snake(metric.__class__.__name__)
else:
names = 'unknown_metric'
return names, metric
|
8,978 |
def seconds_to_human(secs, granularity=2):
"""Format :class:`~datetime.timedelta` as a human-readable relative time.
:param secs: time difference to format
:type secs: :class:`~datetime.timedelta` or integer
:param int granlarity: number of time units to return (default to 2)
Inspiration for function structure from:
https://gist.github.com/Highstaker/280a09591df4a5fb1363b0bbaf858f0d
Examples::
>>> seconds_to_human(65707200)
'2 years, 1 month ago'
>>> seconds_to_human(-17100) # negative amount
'in 4 hours, 45 minutes'
>>> seconds_to_human(-709200)
'in 8 days, 5 hours'
>>> seconds_to_human(39441600, 1) # 1 year + 3 months
'1 year ago'
This function can be used with a :class:`~datetime.timedelta`::
>>> from datetime import timedelta
>>> seconds_to_human(timedelta(days=42, seconds=278))
'1 month, 11 days ago'
The ``granularity`` argument control how detailed the result is::
>>> seconds_to_human(3672) # 2 by default
'1 hour, 1 minute ago'
>>> seconds_to_human(3672, granularity=3)
'1 hour, 1 minute, 12 seconds ago'
>>> seconds_to_human(3672, granularity=1)
'1 hour ago'
"""
if isinstance(secs, datetime.timedelta):
secs = secs.total_seconds()
future = False
if secs < 0:
future = True
secs = int(secs)
secs = abs(secs)
if secs == 0:
# zero is a special case that the algorithm below won't handle correctly (#1841)
result = "0 seconds"
else:
result = ", ".join([
"%s %s" % (value, unit)
for value, unit in get_time_unit(*seconds_to_split(secs))
if value
][:granularity])
if future is False:
result += " ago"
else:
result = "in " + result
return result
|
def seconds_to_human(secs, granularity=2):
"""Format :class:`~datetime.timedelta` as a human-readable relative time.
:param secs: time difference to format
:type secs: :class:`~datetime.timedelta` or integer
:param int granularity: number of time units to return (default to 2)
Inspiration for function structure from:
https://gist.github.com/Highstaker/280a09591df4a5fb1363b0bbaf858f0d
Examples::
>>> seconds_to_human(65707200)
'2 years, 1 month ago'
>>> seconds_to_human(-17100) # negative amount
'in 4 hours, 45 minutes'
>>> seconds_to_human(-709200)
'in 8 days, 5 hours'
>>> seconds_to_human(39441600, 1) # 1 year + 3 months
'1 year ago'
This function can be used with a :class:`~datetime.timedelta`::
>>> from datetime import timedelta
>>> seconds_to_human(timedelta(days=42, seconds=278))
'1 month, 11 days ago'
The ``granularity`` argument control how detailed the result is::
>>> seconds_to_human(3672) # 2 by default
'1 hour, 1 minute ago'
>>> seconds_to_human(3672, granularity=3)
'1 hour, 1 minute, 12 seconds ago'
>>> seconds_to_human(3672, granularity=1)
'1 hour ago'
"""
if isinstance(secs, datetime.timedelta):
secs = secs.total_seconds()
future = False
if secs < 0:
future = True
secs = int(secs)
secs = abs(secs)
if secs == 0:
# zero is a special case that the algorithm below won't handle correctly (#1841)
result = "0 seconds"
else:
result = ", ".join([
"%s %s" % (value, unit)
for value, unit in get_time_unit(*seconds_to_split(secs))
if value
][:granularity])
if future is False:
result += " ago"
else:
result = "in " + result
return result
|
30,769 |
def build_where_clause(args: dict) -> str:
"""
This function transforms the relevant entries of dict into the where part of a SQL query
Args:
args: The arguments dict
Returns:
A string represents the where part of a SQL query
"""
args_dict = {
'source_ip': 'source_ip.value',
'dest_ip': 'dest_ip.value',
'rule_matched': 'rule_matched',
'from_zone': 'from_zone',
'to_zone': 'to_zone',
'source_port': 'source_port',
'dest_port': 'dest_port',
'action': 'action.value',
'file_sha_256': 'file_sha_256',
'file_name': 'file_name',
}
if args.get('ip') and args.get('source_ip') or args.get('ip') and args.get('dest_ip'):
raise DemistoException('Error: You cant enter the "ip" argument with either "source_ip" nor "dest_ip" '
'arguments')
if args.get('port') and args.get('source_port') or args.get('port') and args.get('dest_port'):
raise DemistoException('Error: You cant enter the "port" argument with either "source_port" nor "dest_port" '
'arguments')
non_string_keys = {'dest_port', 'source_port'}
if 'query' in args:
# if query arg is supplied than we just need to parse it and only it
return args['query'].strip()
where_clause = ''
if args.get('ip'):
ips = argToList(args.pop('ip'))
where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')'
if any(args.get(key) for key in args_dict) or args.get('port'):
where_clause += ' AND '
if args.get('port'):
ports = argToList(args.pop('port'))
where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
# We want to add only keys that are part of the query
string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}
or_statements = []
for key, values in string_query_fields.items():
string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list]))
# ports are digested as ints and cannot be sent as strings
non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}
for key, values in non_string_query_fields.items():
non_string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))
where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])
return where_clause
|
def build_where_clause(args: dict) -> str:
"""
This function transforms the relevant entries of dict into the where part of a SQL query
Args:
args: The arguments dict
Returns:
A string represents the where part of a SQL query
"""
args_dict = {
'source_ip': 'source_ip.value',
'dest_ip': 'dest_ip.value',
'rule_matched': 'rule_matched',
'from_zone': 'from_zone',
'to_zone': 'to_zone',
'source_port': 'source_port',
'dest_port': 'dest_port',
'action': 'action.value',
'file_sha_256': 'file_sha_256',
'file_name': 'file_name',
}
if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')):
raise DemistoException('Error: You cant enter the "ip" argument with either "source_ip" nor "dest_ip" '
'arguments')
if args.get('port') and args.get('source_port') or args.get('port') and args.get('dest_port'):
raise DemistoException('Error: You cant enter the "port" argument with either "source_port" nor "dest_port" '
'arguments')
non_string_keys = {'dest_port', 'source_port'}
if 'query' in args:
# if query arg is supplied than we just need to parse it and only it
return args['query'].strip()
where_clause = ''
if args.get('ip'):
ips = argToList(args.pop('ip'))
where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')'
if any(args.get(key) for key in args_dict) or args.get('port'):
where_clause += ' AND '
if args.get('port'):
ports = argToList(args.pop('port'))
where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
# We want to add only keys that are part of the query
string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}
or_statements = []
for key, values in string_query_fields.items():
string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list]))
# ports are digested as ints and cannot be sent as strings
non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}
for key, values in non_string_query_fields.items():
non_string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))
where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])
return where_clause
|
40,504 |
def release_definition_list(name=None, top=None, team_instance=None, project=None, artifact_source_id=None, detect=None):
"""List release definitions.
:param name: Limit results to definitions with this name or starting with this name. Examples: "FabCI"
:type name: str
:param top: Maximum number of definitions to list.
:type top: int
:param team_instance: VSTS account or TFS collection URL. Example: https://myaccount.visualstudio.com
:type team_instance: str
:param project: Name or ID of the team project.
:type project: str
:param artifact_source_id: Limit results to definitions associated with this artifact_source_id.
:type artifact_source_id: str
:param detect: Automatically detect values for instance and project. Default is "on".
:type detect: str
:rtype: [ReleaseDefinitionReference]
"""
team_instance, project = resolve_instance_and_project(detect=detect,
team_instance=team_instance,
project=project)
client = get_release_client(team_instance)
query_order = 'nameAscending'
definition_references = client.get_release_definitions(project=project, search_text=name, artifact_source_id=artifact_source_id,
top=top,
query_order=query_order)
return definition_references
|
def release_definition_list(name=None, top=None, team_instance=None, project=None, artifact_source_id=None, detect=None):
"""List release definitions.
:param name: Limit results to definitions with this name or starting with this name. Example: "FabCI"
:type name: str
:param top: Maximum number of definitions to list.
:type top: int
:param team_instance: VSTS account or TFS collection URL. Example: https://myaccount.visualstudio.com
:type team_instance: str
:param project: Name or ID of the team project.
:type project: str
:param artifact_source_id: Limit results to definitions associated with this artifact_source_id.
:type artifact_source_id: str
:param detect: Automatically detect values for instance and project. Default is "on".
:type detect: str
:rtype: [ReleaseDefinitionReference]
"""
team_instance, project = resolve_instance_and_project(detect=detect,
team_instance=team_instance,
project=project)
client = get_release_client(team_instance)
query_order = 'nameAscending'
definition_references = client.get_release_definitions(project=project, search_text=name, artifact_source_id=artifact_source_id,
top=top,
query_order=query_order)
return definition_references
|
42,928 |
def _node_coords(graph: nx.Graph, l: dict) -> Tuple:
""" Provides the coordinates for the graph nodes when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of nodes and their respective coordinates
Returns:
Tuple: x and y coordinates for each node
"""
n_x = []
n_y = []
for n in graph.nodes():
n_x.append(l[n][0])
n_y.append(l[n][1])
return {"x": n_x, "y": n_y}
|
def _node_coords(graph: nx.Graph, l: dict) -> Tuple:
""" Provides the coordinates for the graph nodes when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of nodes and their respective coordinates
Returns:
Dict[str, list]: lists of x and y coordinates accessed as keys of a dictionary
"""
n_x = []
n_y = []
for n in graph.nodes():
n_x.append(l[n][0])
n_y.append(l[n][1])
return {"x": n_x, "y": n_y}
|
23,873 |
def _allowed(ctx, param, value): # pragma: no cover
_supported_ansible_and_python_version()
return value
|
def _allowed(ctx, param, value): # pragma: no cover
_terminate_if_unsupported_env()
return value
|
17,711 |
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
# Identify the uniqeu dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameters
import hoomd.md.dihedral
periodic_torsion = hoomd.md.dihedral.harmonic() # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
if dihedral_type.phase > 0.0001:
warnings.warn("Dihedral type {} detected with " +
"non-zero phase shift {} ".format(dihedral_type.phae) +
"this is not currently supported in HOOMD, " +
"will ignore")
else:
periodic_torsion.dihedral_coeff.set(name,
k=2*dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per)
return periodic_torsion
|
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
# Identify the unique dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameters
import hoomd.md.dihedral
periodic_torsion = hoomd.md.dihedral.harmonic() # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
if dihedral_type.phase > 0.0001:
warnings.warn("Dihedral type {} detected with " +
"non-zero phase shift {} ".format(dihedral_type.phae) +
"this is not currently supported in HOOMD, " +
"will ignore")
else:
periodic_torsion.dihedral_coeff.set(name,
k=2*dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per)
return periodic_torsion
|
31,176 |
def get_indicators_context(incident):
file_context: List[Any] = []
process_context: List[Any] = []
ip_context: List[Any] = []
for alert in incident.get('alerts'):
# file context
file_details = {
'Name': alert.get('action_file_name'),
'Path': alert.get('action_file_path'),
'SHA265': alert.get('action_file_sha256'),
'MD5': alert.get('action_file_md5')
}
remove_nulls_from_dictionary(file_details)
if file_details:
file_context.append(file_details)
# process context
process_types = ['actor', 'os_actor', 'causality_actor', 'action']
for process_type in process_types:
single_process_context = get_process_context(alert, process_type)
if single_process_context:
process_context.append(single_process_context)
# ip context
add_to_ip_context(alert, ip_context)
network_artifacts = incident.get('network_artifacts', [])
domain_context = create_context_from_network_artifacts(network_artifacts, ip_context)
file_artifacts = incident.get('file_artifacts', [])
for file in file_artifacts:
file_context.append({
'Name': file.get('file_name'),
'SHA256': file.get('file_sha256')
})
return file_context, process_context, domain_context, ip_context
|
def get_indicators_context(incident):
file_context: List[Any] = []
process_context: List[Any] = []
ip_context: List[Any] = []
for alert in incident.get('alerts'):
# file context
file_details = {
'Name': alert.get('action_file_name'),
'Path': alert.get('action_file_path'),
'SHA265': alert.get('action_file_sha256'),
'MD5': alert.get('action_file_md5'),
}
remove_nulls_from_dictionary(file_details)
if file_details:
file_context.append(file_details)
# process context
process_types = ['actor', 'os_actor', 'causality_actor', 'action']
for process_type in process_types:
single_process_context = get_process_context(alert, process_type)
if single_process_context:
process_context.append(single_process_context)
# ip context
add_to_ip_context(alert, ip_context)
network_artifacts = incident.get('network_artifacts', [])
domain_context = create_context_from_network_artifacts(network_artifacts, ip_context)
file_artifacts = incident.get('file_artifacts', [])
for file in file_artifacts:
file_context.append({
'Name': file.get('file_name'),
'SHA256': file.get('file_sha256')
})
return file_context, process_context, domain_context, ip_context
|
29,227 |
def run_beam_job(
job_name: Optional[str] = None,
job_class: Optional[Type[base_jobs.JobBase]] = None,
run_synchronously: Optional[bool] = None
) -> beam_job_domain.BeamJobRun:
"""Starts a new Apache Beam job and returns metadata about its execution.
Args:
job_name: str. The name of the job to run. If not provided, then
job_class must not be None.
job_class: type(JobBase). A subclass of JobBase to begin running. This
value takes precedence over job_name.
run_synchronously: bool. Whether to run the job synchronously. If not
provided, then the value is decided by whether Oppia is running in
emulator mode.
Returns:
BeamJobRun. Metadata about the run's execution.
"""
if job_class is None and job_name is None:
raise ValueError('Must specify the job class to run')
if job_class is None:
# MyPy is wrong. We know job_name is not None in this branch because if
# it were, the ValueError above would have been raised.
job_class = jobs_registry.get_job_class_by_name(job_name) # type: ignore[arg-type]
if run_synchronously is None:
run_synchronously = constants.EMULATOR_MODE
run_model = jobs_manager.run_job(job_class, run_synchronously)
return get_beam_job_run_from_model(run_model)
|
def run_beam_job(
job_name: Optional[str] = None,
job_class: Optional[Type[base_jobs.JobBase]] = None,
run_synchronously: Optional[bool] = None
) -> beam_job_domain.BeamJobRun:
"""Starts a new Apache Beam job and returns metadata about its execution.
Args:
job_name: str. The name of the job to run. If not provided, then
job_class must not be None.
job_class: type(JobBase). A subclass of JobBase to begin running. This
value takes precedence over job_name.
run_synchronously: bool. Whether to run the job synchronously. If not
provided, then the value is decided by whether Oppia is running in
emulator mode.
Returns:
BeamJobRun. Metadata about the run's execution.
"""
if job_class is None and job_name is None:
raise ValueError('Must specify the job class or name to run')
if job_class is None:
# MyPy is wrong. We know job_name is not None in this branch because if
# it were, the ValueError above would have been raised.
job_class = jobs_registry.get_job_class_by_name(job_name) # type: ignore[arg-type]
if run_synchronously is None:
run_synchronously = constants.EMULATOR_MODE
run_model = jobs_manager.run_job(job_class, run_synchronously)
return get_beam_job_run_from_model(run_model)
|
49,094 |
def test_issue_22210():
d = Symbol('d', integer=True)
expr = 2*Derivative(sin(x), (x, d))
assert expr.simplify() == expr
|
def test_issue_22210():
d = Symbol('d', integer=True)
expr = 2*Derivative(sin(x), (x, d))
assert expr.simplify() == expr
|
30,790 |
def send_mail(emailto, emailfrom, subject, body, entry_ids, cc, bcc, htmlBody, replyTo, file_names, attach_cid,
transientFile, transientFileContent, transientFileCID, additional_headers, templateParams):
demisto.log("the body is " + str(body))
demisto.log("the attach_cid is " + str(attach_cid))
demisto.log("htmlBody is " + str(htmlBody))
demisto.log("entry_ids is " + str(entry_ids))
demisto.log("file_names is " + str(file_names))
if entry_ids == [] and file_names == [] and attach_cid == [] and htmlBody and body is None:
# if there is only htmlbody and no attachments to the mail , we would like to send it without attaching the body
demisto.log("entered to only html body without attachments")
message = MIMEText(htmlBody, 'html')
elif entry_ids == [] and file_names == [] and attach_cid == [] and body and htmlBody is None:
# if there is only body and no attachments to the mail , we would like to send it without attaching every part
demisto.log("entered to only body without attachments")
message = MIMEText(body, 'plain', 'utf-8')
else:
message = MIMEMultipart()
message['to'] = header(','.join(emailto))
message['cc'] = header(','.join(cc))
message['bcc'] = header(','.join(bcc))
message['from'] = header(emailfrom)
message['subject'] = header(subject)
message['reply-to'] = header(replyTo)
# if there are any attachments to the mail
if entry_ids or file_names or attach_cid or (body and htmlBody):
templateParams = template_params(templateParams)
if templateParams is not None:
if body is not None:
body = body.format(**templateParams)
if htmlBody is not None:
htmlBody = htmlBody.format(**templateParams)
if additional_headers is not None and len(additional_headers) > 0:
for h in additional_headers:
header_name_and_value = h.split('=')
message[header_name_and_value[0]] = header(header_name_and_value[1])
msg = MIMEText(body, 'plain', 'utf-8')
message.attach(msg)
htmlAttachments = [] # type: list
inlineAttachments = [] # type: list
if htmlBody is not None:
htmlBody, htmlAttachments = handle_html(htmlBody)
msg = MIMEText(htmlBody, 'html', 'utf-8')
message.attach(msg)
if attach_cid is not None and len(attach_cid) > 0:
inlineAttachments = collect_inline_attachments(attach_cid)
else:
# if not html body, cannot attach cids in message
transientFileCID = None
attachments = collect_attachments(entry_ids, file_names)
manual_attachments = collect_manual_attachments()
transientAttachments = transient_attachments(transientFile, transientFileContent, transientFileCID)
attachments = attachments + htmlAttachments + transientAttachments + inlineAttachments + manual_attachments
attachment_handler(message, attachments)
encoded_message = base64.urlsafe_b64encode(message.as_string())
command_args = {
'userId': emailfrom,
'body': {
'raw': encoded_message,
}
}
service = get_service('gmail', 'v1', additional_scopes=['https://www.googleapis.com/auth/gmail.compose',
'https://www.googleapis.com/auth/gmail.send'])
result = service.users().messages().send(**command_args).execute()
return result
|
def send_mail(emailto, emailfrom, subject, body, entry_ids, cc, bcc, htmlBody, replyTo, file_names, attach_cid,
transientFile, transientFileContent, transientFileCID, additional_headers, templateParams):
demisto.log("the body is " + str(body))
demisto.log("the attach_cid is " + str(attach_cid))
demisto.log("htmlBody is " + str(htmlBody))
demisto.log("entry_ids is " + str(entry_ids))
demisto.log("file_names is " + str(file_names))
if htmlBody and not any([entry_ids, file_names, attach_cid, body]):
# if there is only htmlbody and no attachments to the mail , we would like to send it without attaching the body
demisto.log("entered to only html body without attachments")
message = MIMEText(htmlBody, 'html')
elif entry_ids == [] and file_names == [] and attach_cid == [] and body and htmlBody is None:
# if there is only body and no attachments to the mail , we would like to send it without attaching every part
demisto.log("entered to only body without attachments")
message = MIMEText(body, 'plain', 'utf-8')
else:
message = MIMEMultipart()
message['to'] = header(','.join(emailto))
message['cc'] = header(','.join(cc))
message['bcc'] = header(','.join(bcc))
message['from'] = header(emailfrom)
message['subject'] = header(subject)
message['reply-to'] = header(replyTo)
# if there are any attachments to the mail
if entry_ids or file_names or attach_cid or (body and htmlBody):
templateParams = template_params(templateParams)
if templateParams is not None:
if body is not None:
body = body.format(**templateParams)
if htmlBody is not None:
htmlBody = htmlBody.format(**templateParams)
if additional_headers is not None and len(additional_headers) > 0:
for h in additional_headers:
header_name_and_value = h.split('=')
message[header_name_and_value[0]] = header(header_name_and_value[1])
msg = MIMEText(body, 'plain', 'utf-8')
message.attach(msg)
htmlAttachments = [] # type: list
inlineAttachments = [] # type: list
if htmlBody is not None:
htmlBody, htmlAttachments = handle_html(htmlBody)
msg = MIMEText(htmlBody, 'html', 'utf-8')
message.attach(msg)
if attach_cid is not None and len(attach_cid) > 0:
inlineAttachments = collect_inline_attachments(attach_cid)
else:
# if not html body, cannot attach cids in message
transientFileCID = None
attachments = collect_attachments(entry_ids, file_names)
manual_attachments = collect_manual_attachments()
transientAttachments = transient_attachments(transientFile, transientFileContent, transientFileCID)
attachments = attachments + htmlAttachments + transientAttachments + inlineAttachments + manual_attachments
attachment_handler(message, attachments)
encoded_message = base64.urlsafe_b64encode(message.as_string())
command_args = {
'userId': emailfrom,
'body': {
'raw': encoded_message,
}
}
service = get_service('gmail', 'v1', additional_scopes=['https://www.googleapis.com/auth/gmail.compose',
'https://www.googleapis.com/auth/gmail.send'])
result = service.users().messages().send(**command_args).execute()
return result
|
1,148 |
def load_resultfile(results_file, resolve=True):
"""
Load InterfaceResult file from path.
Parameters
----------
results_file : pathlike
Path to an existing pickle (``result_<interface name>.pklz``) created with
``save_resultfile``.
Raises ``FileNotFoundError`` if ``results_file`` does not exist.
resolve : bool
Determines whether relative paths will be resolved to absolute (default is ``True``).
Returns
-------
result : InterfaceResult
A Nipype object containing the runtime, inputs, outputs and other interface information
such as a traceback in the case of errors.
"""
results_file = Path(results_file)
if not results_file.exists():
raise FileNotFoundError(results_file)
result = loadpkl(results_file)
if resolve and hasattr(result,"outputs") and result.outputs:
try:
outputs = result.outputs.get()
except TypeError: # This is a Bunch
logger.debug("Outputs object of loaded result %s is a Bunch.", results_file)
return result
logger.debug("Resolving paths in outputs loaded from results file.")
for trait_name, old in list(outputs.items()):
if isdefined(old):
if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath):
old = result.outputs.trait(trait_name).handler.get_value(
result.outputs, trait_name
)
value = resolve_path_traits(
result.outputs.trait(trait_name), old, results_file.parent
)
setattr(result.outputs, trait_name, value)
return result
|
def load_resultfile(results_file, resolve=True):
"""
Load InterfaceResult file from path.
Parameters
----------
results_file : pathlike
Path to an existing pickle (``result_<interface name>.pklz``) created with
``save_resultfile``.
Raises ``FileNotFoundError`` if ``results_file`` does not exist.
resolve : bool
Determines whether relative paths will be resolved to absolute (default is ``True``).
Returns
-------
result : InterfaceResult
A Nipype object containing the runtime, inputs, outputs and other interface information
such as a traceback in the case of errors.
"""
results_file = Path(results_file)
if not results_file.exists():
raise FileNotFoundError(results_file)
result = loadpkl(results_file)
if resolve and getattr(result, "outputs", None):
try:
outputs = result.outputs.get()
except TypeError: # This is a Bunch
logger.debug("Outputs object of loaded result %s is a Bunch.", results_file)
return result
logger.debug("Resolving paths in outputs loaded from results file.")
for trait_name, old in list(outputs.items()):
if isdefined(old):
if result.outputs.trait(trait_name).is_trait_type(OutputMultiPath):
old = result.outputs.trait(trait_name).handler.get_value(
result.outputs, trait_name
)
value = resolve_path_traits(
result.outputs.trait(trait_name), old, results_file.parent
)
setattr(result.outputs, trait_name, value)
return result
|
51,477 |
def scatter_example_dataset(*, seed=None) -> Dataset:
"""
Create an example dataset.
Parameters
----------
seed : integer, optional
Seed for the random number generation. The default is None.
"""
rng = np.random.default_rng(seed)
A = DataArray(
np.zeros([3, 11, 4, 4]),
dims=["x", "y", "z", "w"],
coords=[
np.arange(3),
np.linspace(0, 1, 11),
np.arange(4),
0.1 * rng.standard_normal(4),
],
)
B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w
A = -0.1 * A.x + A.y / (5 + A.z) + A.w
ds = Dataset({"A": A, "B": B})
ds["w"] = ["one", "two", "three", "five"]
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.z.attrs["units"] = "zunits"
ds.w.attrs["units"] = "wunits"
ds.A.attrs["units"] = "Aunits"
ds.B.attrs["units"] = "Bunits"
return ds
|
def scatter_example_dataset(*, seed=None) -> Dataset:
"""
Create an example dataset.
Parameters
----------
seed : int, optional
Seed for the random number generation.
"""
rng = np.random.default_rng(seed)
A = DataArray(
np.zeros([3, 11, 4, 4]),
dims=["x", "y", "z", "w"],
coords=[
np.arange(3),
np.linspace(0, 1, 11),
np.arange(4),
0.1 * rng.standard_normal(4),
],
)
B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w
A = -0.1 * A.x + A.y / (5 + A.z) + A.w
ds = Dataset({"A": A, "B": B})
ds["w"] = ["one", "two", "three", "five"]
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.z.attrs["units"] = "zunits"
ds.w.attrs["units"] = "wunits"
ds.A.attrs["units"] = "Aunits"
ds.B.attrs["units"] = "Bunits"
return ds
|
5,494 |
def test_delete_user_no_revisions_but_attachment_revisions_donate(
db, user_client, wiki_user, django_user_model
):
"""
This test is based on the bug report
https://github.com/mdn/kuma/issues/6479
The user didn't have any revisions to confront the legacy of, but there might be
other things attached to the user.
"""
other_user = django_user_model.objects.create(
username="other", email="other@example.com"
)
assert not Revision.objects.filter(creator=wiki_user).update(creator=other_user)
attachment_revision = AttachmentRevision(
attachment=Attachment.objects.create(title="test attachment"),
file="some/path.ext",
mime_type="application/kuma",
creator=wiki_user,
title="test attachment",
)
attachment_revision.save()
url = reverse("users.user_delete", kwargs={"username": wiki_user.username})
response = user_client.post(url, HTTP_HOST=settings.WIKI_HOST)
# This means it didn't work! The form rejects.
assert response.status_code == 200
# Ok, let's donate the attachment revisions to "Anonymous"
response = user_client.post(
url, {"attributions": "donate"}, HTTP_HOST=settings.WIKI_HOST
)
# This means it didn't work! The form rejects.
assert response.status_code == 302
with pytest.raises(User.DoesNotExist):
wiki_user.refresh_from_db()
attachment_revision.refresh_from_db()
assert attachment_revision.creator.username == "Anonymous"
|
def test_delete_user_no_revisions_but_attachment_revisions_donate(
db, user_client, wiki_user, django_user_model
):
"""
This test is based on the bug report
https://github.com/mdn/kuma/issues/6479
The user didn't have any revisions to confront the legacy of, but there might be
other things attached to the user.
"""
other_user = django_user_model.objects.create(
username="other", email="other@example.com"
)
assert not Revision.objects.filter(creator=wiki_user).exists()
attachment_revision = AttachmentRevision(
attachment=Attachment.objects.create(title="test attachment"),
file="some/path.ext",
mime_type="application/kuma",
creator=wiki_user,
title="test attachment",
)
attachment_revision.save()
url = reverse("users.user_delete", kwargs={"username": wiki_user.username})
response = user_client.post(url, HTTP_HOST=settings.WIKI_HOST)
# This means it didn't work! The form rejects.
assert response.status_code == 200
# Ok, let's donate the attachment revisions to "Anonymous"
response = user_client.post(
url, {"attributions": "donate"}, HTTP_HOST=settings.WIKI_HOST
)
# This means it didn't work! The form rejects.
assert response.status_code == 302
with pytest.raises(User.DoesNotExist):
wiki_user.refresh_from_db()
attachment_revision.refresh_from_db()
assert attachment_revision.creator.username == "Anonymous"
|
55,394 |
def _get_autolog_metrics(fit_model):
result_metrics = {}
whitelist_metrics = [metric for metric in dir(fit_model)
if metric in _autolog_metric_whitelist]
for metric in whitelist_metrics:
metric_value = getattr(fit_model, metric)
if _is_numeric(metric_value):
result_metrics[metric] = metric_value
return result_metrics
|
def _get_autolog_metrics(fit_model):
result_metrics = {}
whitelist_metrics = [metric for metric in dir(fit_model)
if metric in _autolog_metric_whitelist]
for metric in whitelist_metrics:
try:
metric_value = getattr(fit_model, metric)
except Exception:
continue
if _is_numeric(metric_value):
result_metrics[metric] = metric_value
return result_metrics
|
20,718 |
def getPrintInformation(printer_name) -> PrintInformation:
mock_application = MagicMock(name = "mock_application")
mocked_preferences = MagicMock(name="mocked_preferences")
mocked_extruder_stack = MagicMock()
mocked_extruder_stack.getProperty = MagicMock(return_value = 3)
mocked_material = MagicMock(name= "mocked material")
mocked_material.getMetaDataEntry = MagicMock(return_value = "omgzomg")
mocked_extruder_stack.material = mocked_material
mock_application.getInstance = MagicMock(return_value = mock_application)
mocked_preferences.getValue = MagicMock(side_effect=preferencesGetValue)
global_container_stack = MagicMock()
global_container_stack.definition.getName = MagicMock(return_value = printer_name)
mock_application.getGlobalContainerStack = MagicMock(return_value = global_container_stack)
mock_application.getPreferences = MagicMock(return_value = mocked_preferences)
multi_build_plate_model = MagicMock()
multi_build_plate_model.maxBuildPlate = 0
mock_application.getMultiBuildPlateModel = MagicMock(return_value = multi_build_plate_model)
# Mock-up the entire machine manager except the function that needs to be tested: getAbbreviatedMachineName
original_get_abbreviated_name = MachineManager.getAbbreviatedMachineName
mock_machine_manager = MagicMock()
mock_machine_manager.getAbbreviatedMachineName = functools.partial(original_get_abbreviated_name, mock_machine_manager)
mock_application.getMachineManager = MagicMock(return_value = mock_machine_manager)
with patch("UM.Application.Application.getInstance", MagicMock(return_value = mock_application)):
with patch("json.loads", lambda x: {}):
print_information = PrintInformation.PrintInformation(mock_application)
return print_information
|
def getPrintInformation(printer_name) -> PrintInformation:
mock_application = MagicMock(name = "mock_application")
mocked_preferences = MagicMock(name="mocked_preferences")
mocked_extruder_stack = MagicMock()
mocked_extruder_stack.getProperty = MagicMock(return_value = 3)
mocked_material = MagicMock(name= "mocked material")
mocked_material.getMetaDataEntry = MagicMock(return_value = "omgzomg")
mocked_extruder_stack.material = mocked_material
mock_application.getInstance = MagicMock(return_value = mock_application)
mocked_preferences.getValue = MagicMock(side_effect = preferencesGetValue)
global_container_stack = MagicMock()
global_container_stack.definition.getName = MagicMock(return_value = printer_name)
mock_application.getGlobalContainerStack = MagicMock(return_value = global_container_stack)
mock_application.getPreferences = MagicMock(return_value = mocked_preferences)
multi_build_plate_model = MagicMock()
multi_build_plate_model.maxBuildPlate = 0
mock_application.getMultiBuildPlateModel = MagicMock(return_value = multi_build_plate_model)
# Mock-up the entire machine manager except the function that needs to be tested: getAbbreviatedMachineName
original_get_abbreviated_name = MachineManager.getAbbreviatedMachineName
mock_machine_manager = MagicMock()
mock_machine_manager.getAbbreviatedMachineName = functools.partial(original_get_abbreviated_name, mock_machine_manager)
mock_application.getMachineManager = MagicMock(return_value = mock_machine_manager)
with patch("UM.Application.Application.getInstance", MagicMock(return_value = mock_application)):
with patch("json.loads", lambda x: {}):
print_information = PrintInformation.PrintInformation(mock_application)
return print_information
|
17,560 |
def generate_analysis_list_per_study(study_id):
"""Get study analyses and their preparations
Parameters
----------
study_id : int
The study id
Returns
-------
list of dict
The available analyses and their general information
"""
analysis_sql = """
SELECT DISTINCT analysis_id, array_agg(DISTINCT artifact_id) AS aids
FROM qiita.analysis_sample analysis_sample
WHERE sample_id IN (SELECT sample_id
FROM qiita.study_sample
WHERE study_id = %s)
GROUP BY analysis_id
ORDER BY analysis_id
"""
extra_sql = """
SELECT analysis_id, analysis.name, analysis.email, analysis.dflt,
array_agg(DISTINCT aa.artifact_id) FILTER (
WHERE aa.artifact_id IS NOT NULL) as artifact_ids,
ARRAY(SELECT DISTINCT prep_template_id
FROM qiita.preparation_artifact
WHERE artifact_id IN %s) as prep_ids,
array_agg(DISTINCT visibility.visibility) FILTER (
WHERE aa.artifact_id IS NOT NULL) as visibility
FROM qiita.analysis analysis
LEFT JOIN qiita.analysis_artifact aa USING (analysis_id)
LEFT JOIN qiita.artifact artifact USING (artifact_id)
LEFT JOIN qiita.visibility visibility USING (visibility_id)
WHERE analysis_id = %s
GROUP BY analysis_id, analysis.name, analysis.email, analysis.dflt
"""
results = []
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(analysis_sql, [study_id])
aids = qdb.sql_connection.TRN.execute_fetchindex()
for aid, artifact_ids in aids:
qdb.sql_connection.TRN.add(
extra_sql, [tuple(artifact_ids), aid])
for row in qdb.sql_connection.TRN.execute_fetchindex():
results.append(dict(row))
return results
|
def generate_analyses_list_per_study(study_id):
"""Get study analyses and their preparations
Parameters
----------
study_id : int
The study id
Returns
-------
list of dict
The available analyses and their general information
"""
analysis_sql = """
SELECT DISTINCT analysis_id, array_agg(DISTINCT artifact_id) AS aids
FROM qiita.analysis_sample analysis_sample
WHERE sample_id IN (SELECT sample_id
FROM qiita.study_sample
WHERE study_id = %s)
GROUP BY analysis_id
ORDER BY analysis_id
"""
extra_sql = """
SELECT analysis_id, analysis.name, analysis.email, analysis.dflt,
array_agg(DISTINCT aa.artifact_id) FILTER (
WHERE aa.artifact_id IS NOT NULL) as artifact_ids,
ARRAY(SELECT DISTINCT prep_template_id
FROM qiita.preparation_artifact
WHERE artifact_id IN %s) as prep_ids,
array_agg(DISTINCT visibility.visibility) FILTER (
WHERE aa.artifact_id IS NOT NULL) as visibility
FROM qiita.analysis analysis
LEFT JOIN qiita.analysis_artifact aa USING (analysis_id)
LEFT JOIN qiita.artifact artifact USING (artifact_id)
LEFT JOIN qiita.visibility visibility USING (visibility_id)
WHERE analysis_id = %s
GROUP BY analysis_id, analysis.name, analysis.email, analysis.dflt
"""
results = []
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(analysis_sql, [study_id])
aids = qdb.sql_connection.TRN.execute_fetchindex()
for aid, artifact_ids in aids:
qdb.sql_connection.TRN.add(
extra_sql, [tuple(artifact_ids), aid])
for row in qdb.sql_connection.TRN.execute_fetchindex():
results.append(dict(row))
return results
|
46,203 |
def imsave(filename: str, data: np.ndarray):
"""custom imaplementation of imread to avoid skimage dependecy"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", "tiff"]:
import tifffile
tifffile.imsave(filename, data)
else:
import imageio
imageio.imsave(filename, data)
|
def imsave(filename: str, data: np.ndarray):
"""custom imaplementation of imread to avoid skimage dependecy"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", ".tiff"]:
import tifffile
tifffile.imsave(filename, data)
else:
import imageio
imageio.imsave(filename, data)
|
58,051 |
def get_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> Union[List[Dict[str, Any]], str]:
"""
get-remote-data command: Returns an updated incident and entries
Args:
client: XSOAR client to use
args:
id: incident id to retrieve
lastUpdate: when was the last time we retrieved data
Returns:
List[Dict[str, Any]]: first entry is the incident (which can be completely empty) and the new entries.
"""
ticket_id = args.get('id', '')
demisto.debug(f'Getting update for remote {ticket_id}')
last_update = dateparser.parse(str(args.get('lastUpdate')), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert last_update is not None
demisto.debug(f'last_update is {last_update}')
_args = {}
_args['incident_id'] = ticket_id
result = get_incidents_list(client=client, args=_args)
if not result:
demisto.debug('Ticket was not found!')
return 'Ticket was not found'
else:
demisto.debug('Ticket was found!')
ticket = result[0]
ticket_last_update = dateparser.parse(str(ticket["modificationDate"]), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert ticket_last_update is not None
if last_update > ticket_last_update:
demisto.debug('Nothing new in the ticket')
ticket = {}
else:
demisto.debug('ticket is updated')
entries = []
# Get actions
# - could be optimized if list_actions would apply filter with last_update timestamp
actions = client.list_actions(incident_id=ticket_id, incident_number=None)
# Filter actions
for action in actions:
if 'Mirrored from Cortex XSOAR' not in action['memoText']:
entry_date = dateparser.parse(action["entryDate"], settings={'TIMEZONE': 'UTC'}) # type: ignore
assert entry_date is not None
if last_update > entry_date:
demisto.debug('skip entry')
else:
demisto.debug('mirror entry to xsoar')
if action["operator"]:
name = action["operator"]["name"]
elif action["person"]:
name = action["person"]["name"]
else:
name = "Unknown"
date_time = entry_date.strftime("%d-%m-%Y %H:%M:%S")
entries.append({
'Type': EntryType.NOTE,
'Contents': f'[{date_time}] {name}:\n\n{action["memoText"]}',
'ContentsFormat': EntryFormat.TEXT,
'Tags': ['mirrored'], # the list of tags to add to the entry
'Note': True # boolean, True for Note, False otherwise
})
if ticket.get('closed'):
if params.get('close_incident'):
demisto.debug(f'ticket is closed: {ticket}')
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': 'Closed by TOPdesk'
},
'ContentsFormat': EntryFormat.JSON
})
demisto.debug(f'Pull result is {ticket}')
return [ticket] + entries
|
def get_remote_data_command(client: Client, args: Dict[str, Any], params: Dict) -> Union[List[Dict[str, Any]], str]:
"""
get-remote-data command: Returns an updated incident and entries
Args:
client: XSOAR client to use
args:
id: incident id to retrieve
lastUpdate: when was the last time we retrieved data
Returns:
List[Dict[str, Any]]: first entry is the incident (which can be completely empty) and the new entries.
"""
ticket_id = args.get('id', '')
demisto.debug(f'Getting update for remote {ticket_id}')
last_update = dateparser.parse(str(args.get('lastUpdate')), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert last_update is not None
demisto.debug(f'last_update is {last_update}')
_args = {}
_args['incident_id'] = ticket_id
result = get_incidents_list(client=client, args=_args)
if not result:
demisto.debug('Ticket was not found!')
mirrored_data = {'id': ticket_id, 'in_mirror_error': 'Ticket was not found'}
return GetRemoteDataResponse(mirrored_object=mirrored_data, entries=[])
else:
demisto.debug('Ticket was found!')
ticket = result[0]
ticket_last_update = dateparser.parse(str(ticket["modificationDate"]), settings={'TIMEZONE': 'UTC'}) # type: ignore
assert ticket_last_update is not None
if last_update > ticket_last_update:
demisto.debug('Nothing new in the ticket')
ticket = {}
else:
demisto.debug('ticket is updated')
entries = []
# Get actions
# - could be optimized if list_actions would apply filter with last_update timestamp
actions = client.list_actions(incident_id=ticket_id, incident_number=None)
# Filter actions
for action in actions:
if 'Mirrored from Cortex XSOAR' not in action['memoText']:
entry_date = dateparser.parse(action["entryDate"], settings={'TIMEZONE': 'UTC'}) # type: ignore
assert entry_date is not None
if last_update > entry_date:
demisto.debug('skip entry')
else:
demisto.debug('mirror entry to xsoar')
if action["operator"]:
name = action["operator"]["name"]
elif action["person"]:
name = action["person"]["name"]
else:
name = "Unknown"
date_time = entry_date.strftime("%d-%m-%Y %H:%M:%S")
entries.append({
'Type': EntryType.NOTE,
'Contents': f'[{date_time}] {name}:\n\n{action["memoText"]}',
'ContentsFormat': EntryFormat.TEXT,
'Tags': ['mirrored'], # the list of tags to add to the entry
'Note': True # boolean, True for Note, False otherwise
})
if ticket.get('closed'):
if params.get('close_incident'):
demisto.debug(f'ticket is closed: {ticket}')
entries.append({
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': 'Closed by TOPdesk'
},
'ContentsFormat': EntryFormat.JSON
})
demisto.debug(f'Pull result is {ticket}')
return [ticket] + entries
|
57,884 |
def cherwell_get_one_step_actions_command():
args = demisto.args()
business_object_id = args.get('busobjectid')
result = get_one_step_actions(business_object_id)
actions: dict = {}
ec = {}
md = ''
get_one_step_actions_recursive(result.get('root'), actions)
if actions:
for key in actions.keys():
md += tableToMarkdown(f'{key} one-step actions:', actions[key],
headerTransform=pascalToSpace, headers=ONE_STEP_ACTION_HEADERS)
ec = {'BusinessObjectId': business_object_id, 'Actions': actions}
else:
md = f'No one-step actions found for business object ID {business_object_id}'
return_results(CommandResults(outputs=ec, readable_output=md, outputs_key_field='BusinessObjectId',
outputs_prefix='Cherwell.OneStepActions', raw_response=result))
|
def cherwell_get_one_step_actions_command():
args = demisto.args()
business_object_id = args.get('busobjectid')
result = get_one_step_actions(business_object_id)
actions: dict = {}
ec = {}
md = ''
get_one_step_actions_recursive(result.get('root'), actions)
if actions:
for key, action in actions.items():
md += tableToMarkdown(f'{key} one-step actions:', action,
headerTransform=pascalToSpace, headers=ONE_STEP_ACTION_HEADERS)
ec = {'BusinessObjectId': business_object_id, 'Actions': actions}
else:
md = f'No one-step actions found for business object ID {business_object_id}'
return_results(CommandResults(outputs=ec, readable_output=md, outputs_key_field='BusinessObjectId',
outputs_prefix='Cherwell.OneStepActions', raw_response=result))
|
13,958 |
def test_deprecated_function_message():
"""Test that `deprecated_function` works with a message
"""
@decorators.deprecated_function(message="don't use {0.__name__}")
def myfunc(value):
return str(value)
with pytest.warns(DeprecationWarning) as record:
myfunc('test')
assert record[0].message == "don't use myfunc"
|
def test_deprecated_function_message():
"""Test that `deprecated_function` works with a message
"""
@decorators.deprecated_function(message="don't use {0.__name__}")
def myfunc(value):
return str(value)
with pytest.warns(DeprecationWarning) as record:
myfunc('test')
assert str(record[0].message) == "don't use myfunc"
|
29,313 |
def update_story(
committer_id, story_id, change_list, commit_message):
"""Updates a story. Commits changes.
# NOTE: This function should not be called on its own. Access it
# through `topic_services.update_story_and_topic_summary`.
Args:
committer_id: str. The id of the user who is performing the update
action.
story_id: str. The story id.
change_list: list(StoryChange). These changes are applied in sequence to
produce the resulting story.
commit_message: str or None. A description of changes made to the
story.
Raises:
ValueError. Expected a commit message but received none.
ValidationError. Exploration is already linked to a different story.
ValidationError. Story Url Fragment is not unique across the site.
"""
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = is_story_published_and_present_in_topic(new_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published:
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
|
def update_story(
committer_id, story_id, change_list, commit_message):
"""Updates a story. Commits changes.
# NOTE: This function should not be called on its own. Access it
# through `topic_services.update_story_and_topic_summary`.
Args:
committer_id: str. The id of the user who is performing the update
action.
story_id: str. The story id.
change_list: list(StoryChange). These changes are applied in sequence to
produce the resulting story.
commit_message: str or None. A description of changes made to the
story.
Raises:
ValueError. Expected a commit message but received none.
ValidationError. Exploration is already linked to a different story.
ValidationError. Story url fragment is not unique across the site.
"""
if not commit_message:
raise ValueError('Expected a commit message but received none.')
old_story = story_fetchers.get_story_by_id(story_id)
new_story, exp_ids_removed_from_story, exp_ids_added_to_story = (
apply_change_list(story_id, change_list))
story_is_published = is_story_published_and_present_in_topic(new_story)
exploration_context_models_to_be_deleted = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_removed_from_story))
exploration_context_models_to_be_deleted = [
model for model in exploration_context_models_to_be_deleted
if model is not None]
exploration_context_models_collisions_list = (
exp_models.ExplorationContextModel.get_multi(
exp_ids_added_to_story))
for context_model in exploration_context_models_collisions_list:
if context_model is not None and context_model.story_id != story_id:
raise utils.ValidationError(
'The exploration with ID %s is already linked to story '
'with ID %s' % (context_model.id, context_model.story_id))
if (
old_story.url_fragment != new_story.url_fragment and
does_story_exist_with_url_fragment(new_story.url_fragment)):
raise utils.ValidationError(
'Story Url Fragment is not unique across the site.')
_save_story(
committer_id, new_story, commit_message, change_list,
story_is_published)
create_story_summary(new_story.id)
if story_is_published:
opportunity_services.update_exploration_opportunities(
old_story, new_story)
suggestion_services.auto_reject_translation_suggestions_for_exp_ids(
exp_ids_removed_from_story)
exp_models.ExplorationContextModel.delete_multi(
exploration_context_models_to_be_deleted)
new_exploration_context_models = [exp_models.ExplorationContextModel(
id=exp_id,
story_id=story_id
) for exp_id in exp_ids_added_to_story]
exp_models.ExplorationContextModel.update_timestamps_multi(
new_exploration_context_models)
exp_models.ExplorationContextModel.put_multi(new_exploration_context_models)
|
46,231 |
def color_lines(colors):
if len(colors) == 2:
return np.concatenate([[colors[0]] * 2, [colors[1]] * 2], axis=0,)
elif len(colors) == 3:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 2, [colors[2]] * 2], axis=0,
)
else:
return ValueError(
'Either 2 or 3 colors ' f'must be provided, got f{len(colors)}.'
)
|
def color_lines(colors):
if len(colors) == 2:
return np.concatenate([[colors[0]] * 2, [colors[1]] * 2], axis=0,)
elif len(colors) == 3:
return np.concatenate(
[[colors[0]] * 2, [colors[1]] * 2, [colors[2]] * 2], axis=0,
)
else:
return ValueError(
f'Either 2 or 3 colors must be provided, got {len(colors)}.'
)
|
1,250 |
def verbose(msg, indent=0):
if verbose.switch:
print(f"{' ' * indent}{msg}")
|
def verbose(msg, indent=0):
if verbose.switch:
print(' ' * indent + msg)
|
32,841 |
def activate_distributed_headers(tracer, int_config, request_headers=None):
"""
Helper for activating a distributed trace headers' context if enabled in integration config.
"""
int_config = int_config or {}
if int_config.get("distributed_tracing_enabled", False) or int_config.get("distributed_tracing", False):
context = HTTPPropagator.extract(request_headers)
# Only need to activate the new context if something was propagated
if context.trace_id:
tracer.context_provider.activate(context)
|
def activate_distributed_headers(tracer, int_config, request_headers=None):
"""
Helper for activating a distributed trace headers' context if enabled in integration config.
"""
int_config = int_config or {}
if int_config.get("distributed_tracing_enabled", int_config.get("distributed_tracing", False)):
context = HTTPPropagator.extract(request_headers)
# Only need to activate the new context if something was propagated
if context.trace_id:
tracer.context_provider.activate(context)
|
35,066 |
def import_tflite():
"""Lazy import function for tflite.Model"""
try:
# pylint: disable=C0415
import tflite.Model as model
except ImportError:
err = (
"Error: tflite is required and was not found.\n"
'Please install it using "pip install tflite".'
)
raise TVMCException(str(err))
return model
|
def import_tflite():
"""Lazy import function for tflite.Model"""
try:
# pylint: disable=C0415
import tflite.Model as model
except ImportError:
raise TVMCException(create_import_error_string('tflite', 'tflite'))
return model
|
6,658 |
def run_bom_job(
doc: "BOMUpdateLog", boms: Optional[Dict] = None, update_type: Optional[str] = "Replace BOM"
) -> None:
try:
doc.db_set("status", "In Progress")
if not frappe.flags.in_test:
frappe.db.commit()
frappe.db.auto_commit_on_many_writes = 1
boms = frappe._dict(boms or {})
if update_type == "Replace BOM":
replace_bom(boms)
else:
update_cost()
doc.db_set("status", "Completed")
except (Exception, JobTimeoutException):
frappe.db.rollback()
error_log = frappe.log_error(message=frappe.get_traceback(), title=_("BOM Update Tool Error"))
doc.db_set("status", "Failed")
doc.db_set("error_log", error_log.name)
finally:
frappe.db.auto_commit_on_many_writes = 0
frappe.db.commit() # nosemgrep
|
def run_bom_job(
doc: "BOMUpdateLog", boms: Optional[Dict] = None, update_type: Optional[str] = "Replace BOM"
) -> None:
try:
doc.db_set("status", "In Progress")
if not frappe.flags.in_test:
frappe.db.commit()
frappe.db.auto_commit_on_many_writes = 1
boms = frappe._dict(boms or {})
if update_type == "Replace BOM":
replace_bom(boms)
else:
update_cost()
doc.db_set("status", "Completed")
except Exception:
frappe.db.rollback()
error_log = frappe.log_error(message=frappe.get_traceback(), title=_("BOM Update Tool Error"))
doc.db_set("status", "Failed")
doc.db_set("error_log", error_log.name)
finally:
frappe.db.auto_commit_on_many_writes = 0
frappe.db.commit() # nosemgrep
|
34,881 |
def on_device(data, device):
"""Annotate a tensor with device id.
Parameters
----------
data : tvm.relay.Expr
The tensor to be annotated.
device : Union(:py:class:`TVMContext`, str)
The device that the tensor is annotated with.
Returns
-------
result : tvm.relay.Expr
The annotated tensor.
"""
if isinstance(device, _TVMContext):
device = device.device_type
elif isinstance(device, str):
device = _nd.context(device).device_type
else:
raise ValueError("device is expected to be the type of TVMContext or "
"str, but received %s" % (type(device)))
return _make.on_device(data, device)
|
def on_device(data, device):
"""Annotate a tensor with device id.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
device : Union(:py:class:`TVMContext`, str)
The device that the tensor is annotated with.
Returns
-------
result : tvm.relay.Expr
The annotated tensor.
"""
if isinstance(device, _TVMContext):
device = device.device_type
elif isinstance(device, str):
device = _nd.context(device).device_type
else:
raise ValueError("device is expected to be the type of TVMContext or "
"str, but received %s" % (type(device)))
return _make.on_device(data, device)
|
57,538 |
def run_non_trapping_async_fn(*async_fn_args, **kwargs):
async_fn, *args = async_fn_args
with contextlib.closing(async_fn(*args, **kwargs).__await__()) as gen:
try:
gen.send(None)
raise AssertionError(f"{async_fn} did not stop")
except StopIteration as e:
return e.value
|
def run_non_trapping_async_fn(*async_fn_args, **kwargs):
async_fn, *args = async_fn_args
with contextlib.closing(async_fn(*args, **kwargs).__await__()) as gen:
try:
gen.send(None)
except StopIteration as e:
return e.value
else:
raise AssertionError(f"{async_fn} did not stop")
|
26,363 |
def format_timedelta(watson, delta):
"""
Return a string roughly representing a timedelta.
"""
seconds = int(delta.total_seconds())
if watson.config.getboolean('options', 'format_as_decimal'):
return format_timedelta_decimal(watson, delta)
neg = seconds < 0
seconds = abs(seconds)
total = seconds
stems = []
if total >= 3600:
hours = seconds // 3600
stems.append('{}h'.format(hours))
seconds -= hours * 3600
if total >= 60:
mins = seconds // 60
stems.append('{:02}m'.format(mins))
seconds -= mins * 60
stems.append('{:02}s'.format(seconds))
return ('-' if neg else '') + ' '.join(stems)
|
def format_timedelta(delta, as_decimal=False):
"""
Return a string roughly representing a timedelta.
"""
seconds = int(delta.total_seconds())
if watson.config.getboolean('options', 'format_as_decimal'):
return format_timedelta_decimal(watson, delta)
neg = seconds < 0
seconds = abs(seconds)
total = seconds
stems = []
if total >= 3600:
hours = seconds // 3600
stems.append('{}h'.format(hours))
seconds -= hours * 3600
if total >= 60:
mins = seconds // 60
stems.append('{:02}m'.format(mins))
seconds -= mins * 60
stems.append('{:02}s'.format(seconds))
return ('-' if neg else '') + ' '.join(stems)
|
1,527 |
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False,
zero_division="warn"):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool (default = False)
If True, return output as dict
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it is accuracy otherwise.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
labels_given = True
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
|
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2, output_dict=False,
zero_division="warn"):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool (default = False)
If True, return output as dict
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : string / dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy otherwise.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See also
--------
precision_recall_fscore_support, confusion_matrix,
multilabel_confusion_matrix
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
labels_given = True
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
# labelled micro average
micro_is_accuracy = ((y_type == 'multiclass' or y_type == 'binary') and
(not labels_given or
(set(labels) == set(unique_labels(y_true, y_pred)))))
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ['%s' % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith('multilabel'):
average_options = ('micro', 'macro', 'weighted', 'samples')
else:
average_options = ('micro', 'macro', 'weighted')
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers,
[i.item() for i in scores]))
else:
longest_last_line_heading = 'weighted avg'
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = '{:>{width}s} ' + ' {:>9}' * len(headers)
report = head_fmt.format('', *headers, width=width)
report += '\n\n'
row_fmt = '{:>{width}s} ' + ' {:>9.{digits}f}' * 3 + ' {:>9}\n'
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += '\n'
# compute all applicable averages
for average in average_options:
if average.startswith('micro') and micro_is_accuracy:
line_heading = 'accuracy'
else:
line_heading = average + ' avg'
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true, y_pred, labels=labels,
average=average, sample_weight=sample_weight)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(
zip(headers, [i.item() for i in avg]))
else:
if line_heading == 'accuracy':
row_fmt_accuracy = '{:>{width}s} ' + \
' {:>9.{digits}}' * 2 + ' {:>9.{digits}f}' + \
' {:>9}\n'
report += row_fmt_accuracy.format(line_heading, '', '',
*avg[2:], width=width,
digits=digits)
else:
report += row_fmt.format(line_heading, *avg,
width=width, digits=digits)
if output_dict:
if 'accuracy' in report_dict.keys():
report_dict['accuracy'] = report_dict['accuracy']['precision']
return report_dict
else:
return report
|
33,557 |
def _configure_subnet(config):
ec2 = _resource("ec2", config)
use_internal_ips = config["provider"].get("use_internal_ips")
subnets = sorted(
(s for s in ec2.subnets.all() if s.state == "available" and (
use_internal_ips or s.map_public_ip_on_launch)),
reverse=True, # sort from Z-A
key=lambda subnet: subnet.availability_zone)
if not subnets:
raise Exception(
"No usable subnets found, try manually creating an instance in "
"your specified region to populate the list of subnets "
"and trying this again. Note that the subnet must map public IPs "
"on instance launch unless you set 'use_internal_ips': True in "
"the 'provider' config.")
if "availability_zone" in config["provider"]:
azs = config["provider"]["availability_zone"].split(',')
subnets = [s for s in subnets if s.availability_zone in azs]
if not subnets:
raise Exception(
"No usable subnets matching availability zone {} "
"found. Choose a different availability zone or try "
"manually creating an instance in your specified region "
"to populate the list of subnets and trying this again."
.format(config["provider"]["availability_zone"]))
subnet_ids = [s.subnet_id for s in subnets]
subnet_descr = [(s.subnet_id, s.availability_zone) for s in subnets]
if "SubnetIds" not in config["head_node"]:
config["head_node"]["SubnetIds"] = subnet_ids
logger.info("SubnetIds not specified for head node,"
" using {}".format(subnet_descr))
if "SubnetIds" not in config["worker_nodes"]:
config["worker_nodes"]["SubnetIds"] = subnet_ids
logger.info("SubnetId not specified for workers,"
" using {}".format(subnet_descr))
return config
|
def _configure_subnet(config):
ec2 = _resource("ec2", config)
use_internal_ips = config["provider"].get("use_internal_ips", False)
subnets = sorted(
(s for s in ec2.subnets.all() if s.state == "available" and (
use_internal_ips or s.map_public_ip_on_launch)),
reverse=True, # sort from Z-A
key=lambda subnet: subnet.availability_zone)
if not subnets:
raise Exception(
"No usable subnets found, try manually creating an instance in "
"your specified region to populate the list of subnets "
"and trying this again. Note that the subnet must map public IPs "
"on instance launch unless you set 'use_internal_ips': True in "
"the 'provider' config.")
if "availability_zone" in config["provider"]:
azs = config["provider"]["availability_zone"].split(',')
subnets = [s for s in subnets if s.availability_zone in azs]
if not subnets:
raise Exception(
"No usable subnets matching availability zone {} "
"found. Choose a different availability zone or try "
"manually creating an instance in your specified region "
"to populate the list of subnets and trying this again."
.format(config["provider"]["availability_zone"]))
subnet_ids = [s.subnet_id for s in subnets]
subnet_descr = [(s.subnet_id, s.availability_zone) for s in subnets]
if "SubnetIds" not in config["head_node"]:
config["head_node"]["SubnetIds"] = subnet_ids
logger.info("SubnetIds not specified for head node,"
" using {}".format(subnet_descr))
if "SubnetIds" not in config["worker_nodes"]:
config["worker_nodes"]["SubnetIds"] = subnet_ids
logger.info("SubnetId not specified for workers,"
" using {}".format(subnet_descr))
return config
|
20,006 |
def analyze_color(rgb_img, mask, hist_plot_type=None):
"""Analyze the color properties of an image object
Inputs:
rgb_img = RGB image data
mask = Binary mask made from selected contours
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
Returns:
analysis_image = histogram output
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:param hist_plot_type: str
:return analysis_images: list
"""
params.device += 1
if len(np.shape(rgb_img)) < 3:
fatal_error("rgb_img must be an RGB image")
# Mask the input image
masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
# Extract the blue, green, and red channels
b, g, r = cv2.split(masked)
# Convert the BGR image to LAB
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
# Extract the lightness, green-magenta, and blue-yellow channels
l, m, y = cv2.split(lab)
# Convert the BGR image to HSV
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
# Extract the hue, saturation, and value channels
h, s, v = cv2.split(hsv)
# Color channel dictionary
channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}
# Histogram plot types
hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"RGB": ("b", "g", "r"),
"LAB": ("l", "m", "y"),
"HSV": ("h", "s", "v")}
if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
fatal_error("The histogram plot type was " + str(hist_plot_type) +
', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
# Store histograms, plotting colors, and plotting labels
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
"r": {"label": "red", "graph_color": "red",
"hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
"v": {"label": "value", "graph_color": "orange",
"hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
}
# Create list of bin labels for 8-bit data
binval = np.arange(0, 256)
bin_values = [l for l in binval]
analysis_image = None
# Create a dataframe of bin labels and histogram data
dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})
# Make the histogram figure using plotnine
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blue', 'green', 'red'])
)
elif hist_plot_type.upper() == 'LAB':
df_lab = pd.melt(dataset, id_vars=['bins'],
value_vars=['lightness', 'green-magenta', 'blue-yellow'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['yellow', 'magenta', 'dimgray'])
)
elif hist_plot_type.upper() == 'HSV':
df_hsv = pd.melt(dataset, id_vars=['bins'],
value_vars=['hue', 'saturation', 'value'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blueviolet', 'cyan', 'orange'])
)
elif hist_plot_type.upper() == 'ALL':
s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
'dimgray', 'red', 'cyan', 'orange']
df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
value_name='Pixels')
hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(color_channels)
)
analysis_image = hist_fig
# Hue values of zero are red but are also the value for pixels where hue is undefined
# The hue value of a pixel will be undefined when the color values are saturated
# Therefore, hue values of zero are excluded from the calculations below
# Calculate the median hue value
# The median is rescaled from the encoded 0-179 range to the 0-359 degree range
hue_median = np.median(h[np.where(h > 0)]) * 2
# Calculate the circular mean and standard deviation of the encoded hue values
# The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2
# Store into lists instead for pipeline and print_results
# stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}
# Plot or print the histogram
if hist_plot_type is not None:
if params.debug == 'print':
hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
elif params.debug == 'plot':
print(hist_fig)
# Store into global measurements
# RGB signal values are in an unsigned 8-bit scale of 0-255
rgb_values = [i for i in range(0, 256)]
# Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
hue_values = [i * 2 + 1 for i in range(0, 180)]
# Percentage values on a 0-100 scale (lightness, saturation, and value)
percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
# Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
diverging_values = [i for i in range(-128, 128)]
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
elif hist_plot_type.upper() == 'LAB':
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
elif hist_plot_type.upper() == 'HSV':
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
elif hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
# Always save hue stats
outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_mean, label='degrees')
outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
outputs.add_observation(variable='hue_median', trait='hue median',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
# Store images
outputs.images.append([analysis_image])
return analysis_image
|
def analyze_color(rgb_img, mask, hist_plot_type=None):
"""Analyze the color properties of an image object
Inputs:
rgb_img = RGB image data
mask = Binary mask made from selected contours
hist_plot_type = 'None', 'all', 'rgb','lab' or 'hsv'
Returns:
analysis_image = histogram output
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:param hist_plot_type: str
:return analysis_images: list
"""
params.device += 1
if len(np.shape(rgb_img)) < 3:
fatal_error("rgb_img must be an RGB image")
# Mask the input image
masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
# Extract the blue, green, and red channels
b, g, r = cv2.split(masked)
# Convert the BGR image to LAB
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
# Extract the lightness, green-magenta, and blue-yellow channels
l, m, y = cv2.split(lab)
# Convert the BGR image to HSV
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
# Extract the hue, saturation, and value channels
h, s, v = cv2.split(hsv)
# Color channel dictionary
channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}
# Histogram plot types
hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"RGB": ("b", "g", "r"),
"LAB": ("l", "m", "y"),
"HSV": ("h", "s", "v")}
if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
fatal_error("The histogram plot type was " + str(hist_plot_type) +
', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
# Store histograms, plotting colors, and plotting labels
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
"r": {"label": "red", "graph_color": "red",
"hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
"v": {"label": "value", "graph_color": "orange",
"hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
}
# Create list of bin labels for 8-bit data
binval = np.arange(0, 256)
bin_values = [l for l in binval]
analysis_image = None
# Create a dataframe of bin labels and histogram data
dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})
# Make the histogram figure using plotnine
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB':
df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blue', 'green', 'red'])
)
elif hist_plot_type.upper() == 'LAB':
df_lab = pd.melt(dataset, id_vars=['bins'],
value_vars=['lightness', 'green-magenta', 'blue-yellow'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['yellow', 'magenta', 'dimgray'])
)
elif hist_plot_type.upper() == 'HSV':
df_hsv = pd.melt(dataset, id_vars=['bins'],
value_vars=['hue', 'saturation', 'value'],
var_name='Color Channel', value_name='Pixels')
hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blueviolet', 'cyan', 'orange'])
)
elif hist_plot_type.upper() == 'ALL':
s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
'dimgray', 'red', 'cyan', 'orange']
df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
value_name='Pixels')
hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(color_channels)
)
analysis_image = hist_fig
# Hue values of zero are red but are also the value for pixels where hue is undefined
# The hue value of a pixel will be undefined when the color values are saturated
# Therefore, hue values of zero are excluded from the calculations below
# Calculate the median hue value
# The median is rescaled from the encoded 0-179 range to the 0-359 degree range
hue_median = np.median(h[np.where(h > 0)]) * 2
# Calculate the circular mean and standard deviation of the encoded hue values
# The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2
# Store into lists instead for pipeline and print_results
# stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}
# Plot or print the histogram
if hist_plot_type is not None:
if params.debug == 'print':
hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
elif params.debug == 'plot':
print(hist_fig)
# Store into global measurements
# RGB signal values are in an unsigned 8-bit scale of 0-255
rgb_values = [i for i in range(0, 256)]
# Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
hue_values = [i * 2 + 1 for i in range(0, 180)]
# Percentage values on a 0-100 scale (lightness, saturation, and value)
percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
# Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
diverging_values = [i for i in range(-128, 128)]
if hist_plot_type is not None:
if hist_plot_type.upper() == 'RGB' or hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
elif hist_plot_type.upper() == 'LAB':
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
elif hist_plot_type.upper() == 'HSV':
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
elif hist_plot_type.upper() == 'ALL':
outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
# Always save hue stats
outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_mean, label='degrees')
outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
outputs.add_observation(variable='hue_median', trait='hue median',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
# Store images
outputs.images.append([analysis_image])
return analysis_image
|
9,476 |
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
unsafe_perm=dict(default=False, type='bool'),
ci=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
unsafe_perm = module.params['unsafe_perm']
ci = module.params['ci']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
executable=executable, registry=registry, ignore_scripts=ignore_scripts,
unsafe_perm=unsafe_perm, state=state)
changed = False
if ci == True:
npm.ci_install()
changed = True
if state == 'present':
installed, missing = npm.list()
if missing:
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if missing:
changed = True
npm.install()
if outdated:
changed = True
npm.update()
else: # absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
|
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
unsafe_perm=dict(default=False, type='bool'),
ci=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
unsafe_perm = module.params['unsafe_perm']
ci = module.params['ci']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
executable=executable, registry=registry, ignore_scripts=ignore_scripts,
unsafe_perm=unsafe_perm, state=state)
changed = False
if ci is True:
npm.ci_install()
changed = True
if state == 'present':
installed, missing = npm.list()
if missing:
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if missing:
changed = True
npm.install()
if outdated:
changed = True
npm.update()
else: # absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
|
30,076 |
def _utf8_content_disposition(disposition, file_name):
"""Create HTTP header for downloading a file with UTF-8 filename.
See this and related answers:
https://stackoverflow.com/a/8996249/2173868
"""
ascii_name = unicodedata.normalize(
'NFKD', file_name).encode('ascii', 'ignore').decode()
header = '{}; filename="{}"'.format(disposition, ascii_name)
if ascii_name != file_name:
quoted_name = urllib.parse.quote(file_name)
header += '; filename*=UTF-8\'\'{}'.format(quoted_name)
return header
|
def _utf8_content_disposition(disposition, file_name):
"""Create HTTP header for downloading a file with UTF-8 filename.
See this and related answers:
https://stackoverflow.com/a/8996249/2173868
"""
ascii_name = unicodedata.normalize(
'NFKD', file_name).encode('ascii', 'ignore').decode()
header = '{}; filename="{}"'.format(disposition, ascii_name)
if ascii_name != file_name:
quoted_name = urllib.parse.quote(file_name)
header += u'; filename*=UTF-8\'\'{}'.format(quoted_name)
return header
|
41,770 |
def create_default_formatter() -> 'colorlog.ColoredFormatter':
"""Create a default formatter of log messages.
This function is not supposed to be directly accessed by library users.
"""
return colorlog.ColoredFormatter(
'%(log_color)s[%(levelname)1.1s %(asctime)s]%(reset)s %(message)s')
|
def create_default_formatter() -> colorlog.ColoredFormatter:
"""Create a default formatter of log messages.
This function is not supposed to be directly accessed by library users.
"""
return colorlog.ColoredFormatter(
'%(log_color)s[%(levelname)1.1s %(asctime)s]%(reset)s %(message)s')
|
30,146 |
def fetch_wind_forecast(zone_key='US-MISO', session=None, target_datetime=None, logger=None):
"""
Requests the day ahead wind forecast (in MW) of a given zone
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
target_datetime (optional) -- used if parser can fetch data for a specific day
logger (optional) -- handles logging when parser is run as main
Return:
A list of dictionaries in the form:
{
'source': 'misoenergy.org',
'value': 12932.0,
'datetime': '2019-01-01T00:00:00Z',
'zoneKey': 'US-MISO'
}
"""
s = session or requests.Session()
req = s.get(wind_forecast_url)
raw_json = req.json()
raw_data = raw_json['Forecast']
data = []
for item in raw_data:
dt = parser.parse(item['DateTimeEST']).replace(tzinfo=tz.gettz('America/New_York'))
value = float(item['Value'])
datapoint = {'datetime': dt,
'value': value,
'source': 'misoenergy.org',
'zoneKey': zone_key}
data.append(datapoint)
return data
|
def fetch_wind_forecast(zone_key='US-MISO', session=None, target_datetime=None, logger=None):
"""
Requests the day ahead wind forecast (in MW) of a given zone
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
target_datetime (optional) -- used if parser can fetch data for a specific day
logger (optional) -- handles logging when parser is run as main
Return:
A list of dictionaries in the form:
{
'source': 'misoenergy.org',
'value': 12932.0,
'datetime': '2019-01-01T00:00:00Z',
'zoneKey': 'US-MISO'
}
"""
s = session or requests.Session()
req = s.get(wind_forecast_url)
raw_json = req.json()
raw_data = raw_json['Forecast']
data = []
for item in raw_data:
dt = parser.parse(item['DateTimeEST']).replace(tzinfo=tz.gettz('America/New_York'))
value = float(item['Value'])
datapoint = {'datetime': dt,
'production': {'wind': value},
'source': 'misoenergy.org',
'zoneKey': zone_key}
data.append(datapoint)
return data
|
45,380 |
def test_feature_names():
dataset = load_breast_cancer()
X = dataset.data
y = dataset.target
feature_names = [f"feat{i}" for i in range(X.shape[1])]
check_dmatrix(
X,
y,
feature_names=feature_names,
)
dmatrix = xgb.DMatrix(X, label=y, feature_names=feature_names)
md_dmatrix = mxgb.DMatrix(
pd.DataFrame(X), label=pd.Series(y), feature_names=feature_names
)
params = {
"objective": "binary:logistic",
"eval_metric": "mlogloss",
}
booster = xgb.train(params, dmatrix, num_boost_round=10)
md_booster = mxgb.train(params, md_dmatrix, num_boost_round=10)
predictions = booster.predict(dmatrix)
modin_predictions = md_booster.predict(md_dmatrix)
preds = pd.DataFrame(predictions).apply(lambda x: round(x))
modin_preds = modin_predictions.apply(lambda x: round(x))
accuracy = accuracy_score(y, preds)
md_accuracy = accuracy_score(y, modin_preds)
np.testing.assert_allclose(accuracy, md_accuracy, atol=0.005, rtol=0.002)
# Different feature_names (default) must raise error in this case
dm = xgb.DMatrix(X)
md_dm = mxgb.DMatrix(pd.DataFrame(X))
with pytest.raises(ValueError):
booster.predict(dm)
with pytest.raises(ValueError):
repr(md_booster.predict(md_dm))
|
def test_feature_names():
dataset = load_breast_cancer()
X = dataset.data
y = dataset.target
feature_names = [f"feat{i}" for i in range(X.shape[1])]
check_dmatrix(
X,
y,
feature_names=feature_names,
)
dmatrix = xgb.DMatrix(X, label=y, feature_names=feature_names)
md_dmatrix = mxgb.DMatrix(
pd.DataFrame(X), label=pd.Series(y), feature_names=feature_names
)
params = {
"objective": "binary:logistic",
"eval_metric": "mlogloss",
}
booster = xgb.train(params, dmatrix, num_boost_round=10)
md_booster = mxgb.train(params, md_dmatrix, num_boost_round=10)
predictions = booster.predict(dmatrix)
modin_predictions = md_booster.predict(md_dmatrix)
preds = pandas.DataFrame(predictions).apply(lambda x: round(x))
modin_preds = modin_predictions.apply(lambda x: round(x))
accuracy = accuracy_score(y, preds)
md_accuracy = accuracy_score(y, modin_preds)
np.testing.assert_allclose(accuracy, md_accuracy, atol=0.005, rtol=0.002)
# Different feature_names (default) must raise error in this case
dm = xgb.DMatrix(X)
md_dm = mxgb.DMatrix(pd.DataFrame(X))
with pytest.raises(ValueError):
booster.predict(dm)
with pytest.raises(ValueError):
repr(md_booster.predict(md_dm))
|
40,703 |
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Dict[str, Any]] = None,
save_every_iters: Optional[int] = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: Optional[bool] = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: Optional[bool] = True,
with_pbar_on_iters: Optional[bool] = True,
log_every_iters: Optional[int] = 100,
device: Optional[Union[str, torch.device]] = None,
stop_on_nan: Optional[bool] = True,
clear_cuda_cache: Optional[bool] = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Mapping,
):
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save (dict, optional): dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path (str, optional): output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names (list/tuple, optional): list of names associated with `update_function` output dictionary.
with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan (bool, optional): if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache (bool, optional): if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler (callable or :class:`~ignite.handlers.checkpoint.BaseSaveHandler`, optional): Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
**kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
device (str of torch.device, optional): deprecated argument, it will be removed in v0.5.0.
"""
if device is not None:
warnings.warn("Argument device is unused and deprecated. It will be removed in v0.5.0")
_kwargs = dict(
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
)
_kwargs.update(kwargs)
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **_kwargs)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(trainer, **_kwargs)
|
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Dict[str, Any]] = None,
save_every_iters: Optional[int] = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: Optional[bool] = True,
with_pbar_on_iters: Optional[bool] = True,
log_every_iters: Optional[int] = 100,
device: Optional[Union[str, torch.device]] = None,
stop_on_nan: Optional[bool] = True,
clear_cuda_cache: Optional[bool] = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Mapping,
):
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save (dict, optional): dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path (str, optional): output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names (list/tuple, optional): list of names associated with `update_function` output dictionary.
with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan (bool, optional): if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache (bool, optional): if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler (callable or :class:`~ignite.handlers.checkpoint.BaseSaveHandler`, optional): Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
**kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
device (str of torch.device, optional): deprecated argument, it will be removed in v0.5.0.
"""
if device is not None:
warnings.warn("Argument device is unused and deprecated. It will be removed in v0.5.0")
_kwargs = dict(
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
)
_kwargs.update(kwargs)
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **_kwargs)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(trainer, **_kwargs)
|
32,579 |
def http_request(method, url, data=None, headers={'Accept': 'application/json'}, url_params=None):
# send http request using user settings for unsecure and proxy parameters
# uses basic auth
# returns the http response
LOG('Attempting {} request to {}'.format(method, url))
try:
response = requests.request(
method,
url,
headers=headers,
data=data,
auth=(USERNAME, PASSWORD),
params=url_params,
verify=USE_SSL,
proxies=PROXIES
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred. Consider to set unsecure')
if is_html_response(response):
html_body = get_html_from_response(response)
demisto.results(html_error_entry(html_body))
raise ValueError('Caught HTML response, please verify server url.')
if response.status_code != 200:
msg = parse_error_response(response)
raise ValueError(msg)
try:
return response.json()
except Exception as e:
LOG(e)
return {}
|
def http_request(method, url, data=None, headers={'Accept': 'application/json'}, url_params=None):
# send http request using user settings for unsecure and proxy parameters
# uses basic auth
# returns the http response
LOG(f'Attempting {method} request to {url}')
try:
response = requests.request(
method,
url,
headers=headers,
data=data,
auth=(USERNAME, PASSWORD),
params=url_params,
verify=USE_SSL,
proxies=PROXIES
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred. Consider to set unsecure')
if is_html_response(response):
html_body = get_html_from_response(response)
demisto.results(html_error_entry(html_body))
raise ValueError('Caught HTML response, please verify server url.')
if response.status_code != 200:
msg = parse_error_response(response)
raise ValueError(msg)
try:
return response.json()
except Exception as e:
LOG(e)
return {}
|
29,871 |
def main(args, pass_through_args): # pylint: disable=unused-argument
# shortcut mapping
shortcut = {
'ui': 'spark-ui',
'hist': 'spark-history',
'nb': 'notebook'
}
service = args.service
service = shortcut.get(service, service)
# Dataproc port mapping
dataproc_port_and_path = {
'spark-ui': '18080/?showIncomplete=true',
'spark-history': '18080',
'notebook': '8123'
}
connect_port_and_path = dataproc_port_and_path[service]
cmd = ['gcloud',
'compute',
'ssh',
'{}-m'.format(args.name),
'--zone={}'.format(args.zone),
'--ssh-flag=-D {}'.format(args.port),
'--ssh-flag=-N',
'--ssh-flag=-f',
'--ssh-flag=-n']
print('gcloud command:')
print(' '.join(cmd[:4]) + ' \\\n ' + ' \\\n '.join(cmd[4:]))
if not args.dry_run:
print("Connecting to cluster '{}'...".format(args.name))
# open SSH tunnel to master node
sp.check_call(
cmd,
stderr=sp.STDOUT
)
import platform
system = platform.system()
chrome = os.environ.get('HAILCTL_CHROME')
if system == 'Darwin':
chrome = chrome or r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
elif system == 'Linux':
for c in ['chromium', 'chromium-browser']:
chrome = chrome or shutil.which(c)
if chrome is None:
raise EnvironmentError("cannot find 'chromium' or 'chromium-browser' on path")
elif system == 'Windows':
chrome = chrome or r'/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'
if not chrome:
raise ValueError(f"unsupported system: {system}, set environment variable HAILCTL_CHROME to a chrome executable")
# open Chrome with SOCKS proxy configuration
with open(os.devnull, 'w') as f:
sp.Popen([
chrome,
'http://localhost:{}'.format(connect_port_and_path),
'--proxy-server=socks5://localhost:{}'.format(args.port),
'--host-resolver-rules=MAP * 0.0.0.0 , EXCLUDE localhost',
'--proxy-bypass-list=<-loopback>', # https://chromium.googlesource.com/chromium/src/+/da790f920bbc169a6805a4fb83b4c2ab09532d91
'--user-data-dir={}'.format(tempfile.gettempdir())
], stdout=f, stderr=f)
|
def main(args, pass_through_args): # pylint: disable=unused-argument
# shortcut mapping
shortcut = {
'ui': 'spark-ui',
'hist': 'spark-history',
'nb': 'notebook'
}
service = args.service
service = shortcut.get(service, service)
# Dataproc port mapping
dataproc_port_and_path = {
'spark-ui': '18080/?showIncomplete=true',
'spark-history': '18080',
'notebook': '8123'
}
connect_port_and_path = dataproc_port_and_path[service]
cmd = ['gcloud',
'compute',
'ssh',
'{}-m'.format(args.name),
'--zone={}'.format(args.zone),
'--ssh-flag=-D {}'.format(args.port),
'--ssh-flag=-N',
'--ssh-flag=-f',
'--ssh-flag=-n']
print('gcloud command:')
print(' '.join(cmd[:4]) + ' \\\n ' + ' \\\n '.join(f"'{x}'" for x in cmd[4:]))
if not args.dry_run:
print("Connecting to cluster '{}'...".format(args.name))
# open SSH tunnel to master node
sp.check_call(
cmd,
stderr=sp.STDOUT
)
import platform
system = platform.system()
chrome = os.environ.get('HAILCTL_CHROME')
if system == 'Darwin':
chrome = chrome or r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
elif system == 'Linux':
for c in ['chromium', 'chromium-browser']:
chrome = chrome or shutil.which(c)
if chrome is None:
raise EnvironmentError("cannot find 'chromium' or 'chromium-browser' on path")
elif system == 'Windows':
chrome = chrome or r'/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'
if not chrome:
raise ValueError(f"unsupported system: {system}, set environment variable HAILCTL_CHROME to a chrome executable")
# open Chrome with SOCKS proxy configuration
with open(os.devnull, 'w') as f:
sp.Popen([
chrome,
'http://localhost:{}'.format(connect_port_and_path),
'--proxy-server=socks5://localhost:{}'.format(args.port),
'--host-resolver-rules=MAP * 0.0.0.0 , EXCLUDE localhost',
'--proxy-bypass-list=<-loopback>', # https://chromium.googlesource.com/chromium/src/+/da790f920bbc169a6805a4fb83b4c2ab09532d91
'--user-data-dir={}'.format(tempfile.gettempdir())
], stdout=f, stderr=f)
|
1,667 |
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : array-like or sparse matrix of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray of shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, default=100
Number of alphas along the regularization path
alphas : ndarray, default=None
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array of shape (n_features, ), default=None
The initial values of the coefficients.
verbose : bool or int, default=False
Amount of verbosity.
return_n_iter : bool, default=False
whether to return the number of iterations or not.
positive : bool, default=False
If set to True, forces coefficients to be positive.
(Only allowed when ``y.ndim == 1``).
**params : kwargs
keyword arguments passed to the coordinate descent solver.
Returns
-------
alphas : array of shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array of shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like of shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
For an example, see
:ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py
<sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
--------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[0. 0. 0.46874778]
[0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[0. 0. 0.46915237]
[0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
|
31,743 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
api_key = params.get('credentials').get('password')
base_api = params.get('base_url')
reliability = demisto.params().get('integrationReliability')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
threshold = arg_to_number(params.get('threshold'), required=True, arg_name='threshold')
benign = arg_to_number(params.get('benign'), required=True, arg_name='benign')
if threshold < 0 or benign < 0: # type: ignore
raise DemistoException('threshold and benign should be above 0')
client = Client(
base_url=base_api,
verify=verify_certificate,
proxy=proxy,
api_key=api_key,
threshold=threshold, # type: ignore
benign=benign, # type: ignore
reliability=reliability)
if demisto.command() == 'test-module':
return_results(test_module(client))
elif demisto.command() == 'domain':
return_results(alexa_domain(client, demisto.args()))
else:
raise NotImplementedError('not implemented...')
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
params = demisto.params()
api_key = params.get('credentials').get('password')
base_api = params.get('base_url')
reliability = params.get('integrationReliability')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
threshold = arg_to_number(params.get('threshold'), required=True, arg_name='threshold')
benign = arg_to_number(params.get('benign'), required=True, arg_name='benign')
if threshold < 0 or benign < 0: # type: ignore
raise DemistoException('threshold and benign should be above 0')
client = Client(
base_url=base_api,
verify=verify_certificate,
proxy=proxy,
api_key=api_key,
threshold=threshold, # type: ignore
benign=benign, # type: ignore
reliability=reliability)
if demisto.command() == 'test-module':
return_results(test_module(client))
elif demisto.command() == 'domain':
return_results(alexa_domain(client, demisto.args()))
else:
raise NotImplementedError('not implemented...')
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
47,180 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="dataset map pre-processing"):
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Data collator
# This one will take care of randomly masking the tokens.
pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=data_args.mlm_probability,
pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping text together"):
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Data collator
# This one will take care of randomly masking the tokens.
pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=data_args.mlm_probability,
pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
|
23,090 |
def sort_values(
df,
value,
npartitions=None,
divisions=None,
ascending=True,
upsample=1.0,
partition_size=128e6,
**kwargs,
):
""" See _Frame.sort_values for docstring """
if not isinstance(value, str):
# support ["a"] as input
if isinstance(value, list) and len(value) == 1 and isinstance(value[0], str):
value = value[0]
else:
raise NotImplementedError(
"Dataframe only supports sorting by a single column which must "
"be passed as a string or a list of a sinngle string.\n"
"You passed %s" % str(value)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
sort_by_col = df[value]
if divisions is None:
divisions, mins, maxes = _calculate_divisions(
df, sort_by_col, repartition, npartitions, upsample, partition_size
)
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
and npartitions == df.npartitions
):
# divisions are in the right place
divisions = mins + [maxes[-1]]
return df.map_partitions(M.sort_values, value)
df = rearrange_by_divisions(df, value, divisions)
df.divisions = divisions
df = df.map_partitions(M.sort_values, value)
return df
|
def sort_values(
df,
value,
npartitions=None,
divisions=None,
ascending=True,
upsample=1.0,
partition_size=128e6,
**kwargs,
):
""" See _Frame.sort_values for docstring """
if not isinstance(value, str):
# support ["a"] as input
if isinstance(value, list) and len(value) == 1 and isinstance(value[0], str):
value = value[0]
else:
raise NotImplementedError(
"Dataframe only supports sorting by a single column which must "
"be passed as a string or a list of a single string.\n"
"You passed %s" % str(value)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
sort_by_col = df[value]
if divisions is None:
divisions, mins, maxes = _calculate_divisions(
df, sort_by_col, repartition, npartitions, upsample, partition_size
)
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
and npartitions == df.npartitions
):
# divisions are in the right place
divisions = mins + [maxes[-1]]
return df.map_partitions(M.sort_values, value)
df = rearrange_by_divisions(df, value, divisions)
df.divisions = divisions
df = df.map_partitions(M.sort_values, value)
return df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.