id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
30,545 |
def ips_to_ranges(ips: list, collapse_ips):
ip_ranges = []
ips_range_groups = [] # type:List
ips = sorted(ips)
for ip in ips:
appended = False
if len(ips_range_groups) == 0:
ips_range_groups.append([ip])
continue
for group in ips_range_groups:
if IPAddress(int(ip) + 1) in group or IPAddress(int(ip) - 1) in group:
group.append(ip)
sorted(group)
appended = True
if not appended:
ips_range_groups.append([ip])
for group in ips_range_groups:
# handle single ips
if len(group) == 1:
ip_ranges.append(str(group[0]))
continue
min_ip = group[0]
max_ip = group[-1]
if collapse_ips == COLLAPSE_TO_RANGES:
ip_ranges.append(str(min_ip) + "-" + str(max_ip))
elif collapse_ips == COLLAPSE_TO_CIDR:
moved_ip = False
# CIDR must begin with and even LSB
# if the first ip does not - separate it from the rest of the range
if (int(str(min_ip).split('.')[-1]) % 2) != 0:
ip_ranges.append(str(min_ip))
min_ip = group[1]
moved_ip = True
# CIDR must end with uneven LSB
# if the last ip does not - separate it from the rest of the range
if (int(str(max_ip).split('.')[-1]) % 2) == 0:
ip_ranges.append(str(max_ip))
max_ip = group[-2]
moved_ip = True
# if both min and max ips were shifted and there are only 2 ips in the range
# we added both ips by the shift and now we move to the next range
if moved_ip and len(group) == 2:
continue
else:
ip_ranges.append(str(iprange_to_cidrs(min_ip, max_ip)[0].cidr))
return ip_ranges
|
def ips_to_ranges(ips: list, collapse_ips):
ip_ranges = []
ips_range_groups = [] # type:List
ips = sorted(ips)
for ip in ips:
appended = False
if len(ips_range_groups) == 0:
ips_range_groups.append([ip])
continue
for group in ips_range_groups:
if IPAddress(int(ip) + 1) in group or IPAddress(int(ip) - 1) in group:
group.append(ip)
sorted(group)
appended = True
if not appended:
ips_range_groups.append([ip])
for group in ips_range_groups:
# handle single ips
if len(group) == 1:
ip_ranges.append(str(group[0]))
continue
min_ip = group[0]
max_ip = group[-1]
if collapse_ips == COLLAPSE_TO_RANGES:
ip_ranges.append(str(min_ip) + "-" + str(max_ip))
elif collapse_ips == COLLAPSE_TO_CIDR:
moved_ip = False
# CIDR must begin with an even LSB
# if the first ip does not - separate it from the rest of the range
if (int(str(min_ip).split('.')[-1]) % 2) != 0:
ip_ranges.append(str(min_ip))
min_ip = group[1]
moved_ip = True
# CIDR must end with uneven LSB
# if the last ip does not - separate it from the rest of the range
if (int(str(max_ip).split('.')[-1]) % 2) == 0:
ip_ranges.append(str(max_ip))
max_ip = group[-2]
moved_ip = True
# if both min and max ips were shifted and there are only 2 ips in the range
# we added both ips by the shift and now we move to the next range
if moved_ip and len(group) == 2:
continue
else:
ip_ranges.append(str(iprange_to_cidrs(min_ip, max_ip)[0].cidr))
return ip_ranges
|
57,719 |
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
params = demisto.params()
ACCESS_TOKEN = params.get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
52,299 |
def pytest_report_header(config):
return f'backend: {config.backend}'
|
def pytest_report_header(config):
if backend == 'webkit':
version = version.qWebKitVersion
elif backend == 'webengine':
version = version.PYQT_WEBENGINE_VERSION_STR
else:
raise NotImplementedError(f"unsupported backend: {backend}")
return f'backend: {config.backend} ({version})'
|
25,721 |
def _assert_shapes(
func: C,
print_specs: Sequence[ParsedArgumentSpec],
check_specs: Sequence[ParsedArgumentSpec],
arg_map: Mapping[str, Any],
context: Dict[str, Union[int, List[Optional[int]]]],
) -> None:
def _assert(condition: bool) -> None:
if not condition:
raise ShapeMismatchError(func, print_specs, arg_map)
for arg_spec in check_specs:
actual_shape = arg_spec.argument_ref.get(func, arg_map).shape
if isinstance(actual_shape, tf.TensorShape) and actual_shape.rank is None:
continue
actual = list(actual_shape)
actual_len = len(actual)
actual_i = 0
expected = arg_spec.shape.dims
expected_len = len(expected)
n_variable_rank = sum(dim_spec.variable_rank for dim_spec in expected)
assert n_variable_rank <= 1
if n_variable_rank == 0:
_assert(expected_len == actual_len)
else:
_assert(expected_len - n_variable_rank <= actual_len)
for dim_spec in expected:
if dim_spec.variable_rank:
assert dim_spec.variable_name is not None
expected_name = dim_spec.variable_name
variable_rank_len = actual_len - (expected_len - n_variable_rank)
actual_dims = actual[actual_i : actual_i + variable_rank_len]
actual_i += variable_rank_len
expected_dims = context.get(expected_name)
if expected_dims is None:
expected_dims = cast(List[Optional[int]], variable_rank_len * [None])
context[expected_name] = expected_dims
assert isinstance(expected_dims, list)
_assert(len(expected_dims) == len(actual_dims))
for i, actual_dim in enumerate(actual_dims):
if actual_dim is None:
continue
if expected_dims[i] is None:
expected_dims[i] = actual_dim
else:
_assert(expected_dims[i] == actual_dim)
else:
actual_dim = actual[actual_i]
if actual_dim is not None:
if dim_spec.constant is not None:
_assert(dim_spec.constant == actual_dim)
else:
assert dim_spec.variable_name is not None
expected_dim = context.setdefault(dim_spec.variable_name, actual_dim)
_assert(expected_dim == actual_dim)
actual_i += 1
|
def _assert_shapes(
func: C,
print_specs: Sequence[ParsedArgumentSpec],
check_specs: Sequence[ParsedArgumentSpec],
arg_map: Mapping[str, Any],
context: Dict[str, Union[int, List[Optional[int]]]],
) -> None:
def _assert(condition: bool) -> None:
if not condition:
raise ShapeMismatchError(func, print_specs, arg_map)
for arg_spec in check_specs:
actual_shape = arg_spec.argument_ref.get(func, arg_map).shape
if isinstance(actual_shape, tf.TensorShape) and actual_shape.rank is None:
continue
actual = list(actual_shape)
actual_len = len(actual)
actual_i = 0
expected = arg_spec.shape.dims
expected_len = len(expected)
n_variable_rank = sum(dim_spec.variable_rank for dim_spec in expected)
assert n_variable_rank <= 1, "??"
if n_variable_rank == 0:
_assert(expected_len == actual_len)
else:
_assert(expected_len - n_variable_rank <= actual_len)
for dim_spec in expected:
if dim_spec.variable_rank:
assert dim_spec.variable_name is not None
expected_name = dim_spec.variable_name
variable_rank_len = actual_len - (expected_len - n_variable_rank)
actual_dims = actual[actual_i : actual_i + variable_rank_len]
actual_i += variable_rank_len
expected_dims = context.get(expected_name)
if expected_dims is None:
expected_dims = cast(List[Optional[int]], variable_rank_len * [None])
context[expected_name] = expected_dims
assert isinstance(expected_dims, list)
_assert(len(expected_dims) == len(actual_dims))
for i, actual_dim in enumerate(actual_dims):
if actual_dim is None:
continue
if expected_dims[i] is None:
expected_dims[i] = actual_dim
else:
_assert(expected_dims[i] == actual_dim)
else:
actual_dim = actual[actual_i]
if actual_dim is not None:
if dim_spec.constant is not None:
_assert(dim_spec.constant == actual_dim)
else:
assert dim_spec.variable_name is not None
expected_dim = context.setdefault(dim_spec.variable_name, actual_dim)
_assert(expected_dim == actual_dim)
actual_i += 1
|
32,295 |
def get_conforming_vulnerability_profiles(
topology: Topology,
device_filter_string: str = None,
minimum_block_severities: str = "critical,high",
minimum_alert_severities: str = "medium,low"
) -> List[PanosObjectReference]:
"""
Returns all Vulnerability profiles that conform to best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
:param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode.
:param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode.
"""
return HygieneLookups.get_all_conforming_vulnerability_profiles(
topology,
device_filter_str=device_filter_string,
minimum_block_severities=minimum_block_severities.split(","),
minimum_alert_severities=minimum_alert_severities.split(",")
)
|
def get_conforming_vulnerability_profiles(
topology: Topology,
device_filter_string: Optional[str] = None,
minimum_block_severities: str = "critical,high",
minimum_alert_severities: str = "medium,low"
) -> List[PanosObjectReference]:
"""
Returns all Vulnerability profiles that conform to best practices.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only check given device
:param minimum_block_severities: csv list of severities that must be in drop/reset/block-ip mode.
:param minimum_alert_severities: csv list of severities that must be in alert/default or higher mode.
"""
return HygieneLookups.get_all_conforming_vulnerability_profiles(
topology,
device_filter_str=device_filter_string,
minimum_block_severities=minimum_block_severities.split(","),
minimum_alert_severities=minimum_alert_severities.split(",")
)
|
8,233 |
def get_observatory_groups():
"""
Get a list of observatory IDs for each observatory in CDAWeb.
An observatory group is typically a single mission, which can contain
multiple observatories, e.g. for the STEREO observatory group there are two
observatories, STEREO-A and STEREO-B.
Returns
-------
`astropy.table.Table`
Examples
--------
>>> from sunpy.net.cdaweb import get_observatory_groups
>>>
>>> groups = get_observatory_groups() #doctest: +REMOTE_DATA
>>> groups['Group'] #doctest: +REMOTE_DATA
<Column name='Group' dtype='str55' length=70>
ACE
AMPTE
...
Voyager
Wind
>>> groups.loc['STEREO'] #doctest: +REMOTE_DATA
<Row index=58>
Group Observatories
str55 str531
------ -----------------------------------------------------------------------------
STEREO 'Ahead', 'Behind', 'STA', 'STB', 'STEREO', 'STEREOA', 'STEREOB', 'sta', 'stb'
"""
# Get a list of files for a given dataset between start and end times
url = '/'.join([
_CDAS_BASEURL,
'dataviews', _DATAVIEW,
'observatoryGroups'
])
response = requests.get(url, headers=_CDAS_HEADERS)
obs_groups = response.json()
names = [obs['Name'] for obs in obs_groups['ObservatoryGroupDescription']]
obs_ids = [obs['ObservatoryId'] for obs in obs_groups['ObservatoryGroupDescription']]
# Join all IDs into a single string
obs_ids = ["'" + "', '".join(id) + "'" for id in obs_ids]
t = Table([names, obs_ids], names=['Group', 'Observatories'])
t.add_index('Group')
return t
|
def get_observatory_groups():
"""
Get a list of observatory IDs for each observatory in CDAWeb.
An observatory group is typically a single mission, which can contain
multiple observatories, e.g. for the STEREO observatory group there are two
observatories, STEREO-A and STEREO-B.
Returns
-------
`astropy.table.Table`
Examples
--------
>>> from sunpy.net.cdaweb import get_observatory_groups
>>>
>>> groups = get_observatory_groups() #doctest: +REMOTE_DATA
>>> groups['Group'] #doctest: +REMOTE_DATA
<Column name='Group' dtype='str55' length=...>
ACE
AMPTE
...
Voyager
Wind
>>> groups.loc['STEREO'] #doctest: +REMOTE_DATA
<Row index=58>
Group Observatories
str55 str531
------ -----------------------------------------------------------------------------
STEREO 'Ahead', 'Behind', 'STA', 'STB', 'STEREO', 'STEREOA', 'STEREOB', 'sta', 'stb'
"""
# Get a list of files for a given dataset between start and end times
url = '/'.join([
_CDAS_BASEURL,
'dataviews', _DATAVIEW,
'observatoryGroups'
])
response = requests.get(url, headers=_CDAS_HEADERS)
obs_groups = response.json()
names = [obs['Name'] for obs in obs_groups['ObservatoryGroupDescription']]
obs_ids = [obs['ObservatoryId'] for obs in obs_groups['ObservatoryGroupDescription']]
# Join all IDs into a single string
obs_ids = ["'" + "', '".join(id) + "'" for id in obs_ids]
t = Table([names, obs_ids], names=['Group', 'Observatories'])
t.add_index('Group')
return t
|
36,188 |
def add_category(post):
categories = BlogPageCategory.objects.all()
for category in categories.order_by('?')[:randint(0, len(categories))]:
post.category.add(category)
post.save()
|
def add_category(post):
categories = BlogPageCategory.objects.all()
for category in categories.order_by('?')[:randint(0, len(categories))]:
post.category.add(choice(categories))
post.save()
|
41,936 |
def test_create_study_with_multi_objectives() -> None:
study = optuna.create_study(directions=["maximize"])
assert study.direction == StudyDirection.MAXIMIZE
assert study._is_multi_objective() is False
study = optuna.create_study(directions=["maximize", "minimize"])
assert study.directions == [StudyDirection.MAXIMIZE, StudyDirection.MINIMIZE]
assert study._is_multi_objective() is True
with pytest.raises(ValueError):
# Empty `direction` isn't allowed.
_ = optuna.create_study(directions=[])
with pytest.raises(ValueError):
_ = optuna.create_study(direction="minimize", directions=["maximize"])
with pytest.raises(ValueError):
_ = optuna.create_study(direction="minimize", directions=[])
|
def test_create_study_with_multi_objectives() -> None:
study = optuna.create_study(directions=["maximize"])
assert study.direction == StudyDirection.MAXIMIZE
assert study._is_multi_objective() is False
study = optuna.create_study(directions=["maximize", "minimize"])
assert study.directions == [StudyDirection.MAXIMIZE, StudyDirection.MINIMIZE]
assert study._is_multi_objective()
with pytest.raises(ValueError):
# Empty `direction` isn't allowed.
_ = optuna.create_study(directions=[])
with pytest.raises(ValueError):
_ = optuna.create_study(direction="minimize", directions=["maximize"])
with pytest.raises(ValueError):
_ = optuna.create_study(direction="minimize", directions=[])
|
31,314 |
def get_offset_from_context() -> int:
integration_context = demisto.getIntegrationContext()
return integration_context.get("offset")
|
def get_offset_from_context() -> int:
integration_context = get_integration_context()
return integration_context.get("offset")
|
38,970 |
def enum_process_schema(enum: EnumMeta) -> Dict[str, Any]:
"""
Take a single ``enum`` and generate its schema.
This is similar to the ``model_process_schema`` function, but applies to ``Enum`` objects.
"""
from inspect import getdoc
schema: Dict[str, Any] = {'title': enum.__name__}
# Python assigns all enums a default docstring value of 'An enumeration', so
# all enums will have a description field even if not explicitly provided.
schema['description'] = getdoc(enum)
# Add enum values and the enum field type to the schema.
schema.update({'enum': [item.value for item in cast(Iterable[Enum], enum)]})
add_field_type_to_schema(enum, schema)
return schema
|
def enum_process_schema(enum: EnumMeta) -> Dict[str, Any]:
"""
Take a single ``enum`` and generate its schema.
This is similar to the ``model_process_schema`` function, but applies to ``Enum`` objects.
"""
from inspect import getdoc
schema: Dict[str, Any] = {'title': enum.__name__}
# Python assigns all enums a default docstring value of 'An enumeration', so
# all enums will have a description field even if not explicitly provided.
schema['description'] = getdoc(enum)
# Add enum values and the enum field type to the schema.
schema['enum'] = [item.value for item in cast(Iterable[Enum], enum)]
add_field_type_to_schema(enum, schema)
return schema
|
22,993 |
def _copytobuffer(xxx: Any, inplace: bool = False) -> Tuple[Any, DataType]:
"""
Prepares data for PROJ C-API:
- Makes a copy because PROJ modifies buffer in place
- Make sure dtype is double as that is what PROJ expects
- Makes sure object supports Python Buffer API
If the data is a numpy array, it ensures the data is in C order.
Parameters
----------
xxx: Any
A scalar, list, tuple, numpy.array,
pandas.Series, xaray.DataArray, or dask.array.Array.
inplace: bool, default=False
If True, will return the array withour a copy if it
meets the requirements of the Python Buffer API & PROJ C-API.
Returns
-------
Tuple[Any, DataType]
The copy of the data prepared for the PROJ API & Python Buffer API.
"""
# handle numpy masked Arrays; note that pandas.Series also has a "mask"
# attribute, hence checking for simply the "mask" attr isn't sufficient
if hasattr(xxx, "hardmask"):
return xxx, DataType.ARRAY
# check for pandas.Series, xarray.DataArray or dask.array.Array
if hasattr(xxx, "__array__") and callable(xxx.__array__):
xxx = xxx.__array__()
# handle numpy data
if hasattr(xxx, "shape"):
if xxx.shape == ():
# convert numpy array scalar to float
# (array scalars don't support buffer API)
return _copytobuffer_return_scalar(xxx)
# Use C order when copying to handle arrays in fortran order
return xxx.astype("d", order="C", copy=not inplace), DataType.ARRAY
data_type = DataType.ARRAY
if isinstance(xxx, array):
if not inplace or xxx.typecode != "d":
xxx = array("d", xxx)
elif isinstance(xxx, list):
xxx = array("d", xxx)
data_type = DataType.LIST
elif isinstance(xxx, tuple):
xxx = array("d", xxx)
data_type = DataType.TUPLE
else:
return _copytobuffer_return_scalar(xxx)
return xxx, data_type
|
def _copytobuffer(xxx: Any, inplace: bool = False) -> Tuple[Any, DataType]:
"""
Prepares data for PROJ C-API:
- Makes a copy because PROJ modifies buffer in place
- Make sure dtype is double as that is what PROJ expects
- Makes sure object supports Python Buffer API
If the data is a numpy array, it ensures the data is in C order.
Parameters
----------
xxx: Any
A scalar, list, tuple, numpy.array,
pandas.Series, xaray.DataArray, or dask.array.Array.
inplace: bool, default=False
If True, will return the array withour a copy if it
meets the requirements of the Python Buffer API & PROJ C-API.
Returns
-------
Tuple[Any, DataType]
The copy of the data prepared for the PROJ API & Python Buffer API.
"""
# handle numpy masked Arrays; note that pandas.Series also has a "mask"
# attribute, hence checking for simply the "mask" attr isn't sufficient
if hasattr(xxx, "hardmask"):
return xxx, DataType.ARRAY
# check for pandas.Series, xarray.DataArray or dask.array.Array
if not hasattr(xxx, "hardmask") and hasattr(xxx, "__array__") and callable(xxx.__array__):
xxx = xxx.__array__()
# handle numpy data
if hasattr(xxx, "shape"):
if xxx.shape == ():
# convert numpy array scalar to float
# (array scalars don't support buffer API)
return _copytobuffer_return_scalar(xxx)
# Use C order when copying to handle arrays in fortran order
return xxx.astype("d", order="C", copy=not inplace), DataType.ARRAY
data_type = DataType.ARRAY
if isinstance(xxx, array):
if not inplace or xxx.typecode != "d":
xxx = array("d", xxx)
elif isinstance(xxx, list):
xxx = array("d", xxx)
data_type = DataType.LIST
elif isinstance(xxx, tuple):
xxx = array("d", xxx)
data_type = DataType.TUPLE
else:
return _copytobuffer_return_scalar(xxx)
return xxx, data_type
|
7,377 |
def test_skin():
"""Test that "skin" image can be loaded.
Needs internet connection.
"""
# Fetch causes the test to get skipped if it isn't available during
# CI tests
fetch('data/skin.jpg')
skin = data.skin()
assert skin.ndim == 3
|
def test_skin():
"""Test that "skin" image can be loaded.
Needs internet connection.
"""
# Fetch causes the test to get skipped if pooch isn't available during
# CI tests
fetch('data/skin.jpg')
skin = data.skin()
assert skin.ndim == 3
|
57,972 |
def get_urls_and_emails_from_pdf_html_content(cpy_file_path, output_folder):
"""
Extract the URLs and emails from the pdf html content.
"""
pdf_html_content = get_pdf_htmls_content(cpy_file_path, output_folder)
return set(re.findall(URL_EXTRACTION_REGEX, pdf_html_content)), set(re.findall(EMAIL_REGXEX, pdf_html_content))
|
def get_urls_and_emails_from_pdf_html_content(cpy_file_path: str, output_folder: str):
"""
Extract the URLs and emails from the pdf html content.
Args:
...
"""
pdf_html_content = get_pdf_htmls_content(cpy_file_path, output_folder)
return set(re.findall(URL_EXTRACTION_REGEX, pdf_html_content)), set(re.findall(EMAIL_REGXEX, pdf_html_content))
|
28,066 |
def get_compile_command(action, config, source='', output=''):
""" Generate a standardized and cleaned compile command serving as a base
for other operations. """
cmd = [config.analyzer_binary]
if not has_flag('--target', cmd) and action.target != "":
cmd.append("--target=" + action.target)
cmd.extend(prepend_all('-isystem', action.compiler_includes))
cmd.append('-c')
if not has_flag('-x', cmd):
cmd.extend(['-x', action.lang])
cmd.extend(config.analyzer_extra_arguments)
cmd.extend(action.analyzer_options)
if output:
cmd.extend(['-o', output])
if source:
cmd.append(source)
if not has_flag('-std', cmd) and not has_flag('--std', cmd):
cmd.append(action.compiler_standard)
return cmd
|
def get_compile_command(action, config, source='', output=''):
""" Generate a standardized and cleaned compile command serving as a base
for other operations. """
cmd = [config.analyzer_binary]
if not has_flag('--target', cmd) and action.target != "":
cmd.append(f"--target={action.target}")
cmd.extend(prepend_all('-isystem', action.compiler_includes))
cmd.append('-c')
if not has_flag('-x', cmd):
cmd.extend(['-x', action.lang])
cmd.extend(config.analyzer_extra_arguments)
cmd.extend(action.analyzer_options)
if output:
cmd.extend(['-o', output])
if source:
cmd.append(source)
if not has_flag('-std', cmd) and not has_flag('--std', cmd):
cmd.append(action.compiler_standard)
return cmd
|
38,991 |
def field_type_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
nested_models: Set[str] = set()
f_schema: Dict[str, Any]
if field.shape in {SHAPE_LIST, SHAPE_TUPLE_ELLIPSIS, SHAPE_SEQUENCE, SHAPE_SET, SHAPE_FROZENSET, SHAPE_ITERABLE}:
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
f_schema = {'type': 'array', 'items': items_schema}
if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:
f_schema['uniqueItems'] = True
elif field.shape == SHAPE_MAPPING:
f_schema = {'type': 'object'}
key_field = cast(ModelField, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if regex:
# Dict keys have a regex pattern
# items_schema might be a schema or empty dict, add it either way
f_schema['patternProperties'] = {regex.pattern: items_schema}
elif items_schema:
# The dict values are not simply Any, so they need a schema
f_schema['additionalProperties'] = items_schema
elif field.shape == SHAPE_TUPLE:
sub_schema = []
sub_fields = cast(List[ModelField], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions, sf_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sf_definitions)
nested_models.update(sf_nested_models)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
f_schema = {'type': 'array', 'items': sub_schema}
elif field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}:
f_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
else:
raise AssertionError('Unknown shape: %i' % field.shape)
# check field type to avoid repeated calls to the same __modify_schema__ method
if field.type_ != field.outer_type_:
modify_schema = getattr(field.outer_type_, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
return f_schema, definitions, nested_models
|
def field_type_schema(
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[TypeModelOrEnum, str],
ref_template: str,
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: TypeModelSet,
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
Used by ``field_schema()``, you probably should be using that function.
Take a single ``field`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
definitions = {}
nested_models: Set[str] = set()
f_schema: Dict[str, Any]
if field.shape in {SHAPE_LIST, SHAPE_TUPLE_ELLIPSIS, SHAPE_SEQUENCE, SHAPE_SET, SHAPE_FROZENSET, SHAPE_ITERABLE}:
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
f_schema = {'type': 'array', 'items': items_schema}
if field.shape in {SHAPE_SET, SHAPE_FROZENSET}:
f_schema['uniqueItems'] = True
elif field.shape == SHAPE_MAPPING:
f_schema = {'type': 'object'}
key_field = cast(ModelField, field.key_field)
regex = getattr(key_field.type_, 'regex', None)
items_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if regex:
# Dict keys have a regex pattern
# items_schema might be a schema or empty dict, add it either way
f_schema['patternProperties'] = {regex.pattern: items_schema}
elif items_schema:
# The dict values are not simply Any, so they need a schema
f_schema['additionalProperties'] = items_schema
elif field.shape == SHAPE_TUPLE:
sub_schema = []
sub_fields = cast(List[ModelField], field.sub_fields)
for sf in sub_fields:
sf_schema, sf_definitions, sf_nested_models = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(sf_definitions)
nested_models.update(sf_nested_models)
sub_schema.append(sf_schema)
if len(sub_schema) == 1:
sub_schema = sub_schema[0] # type: ignore
f_schema = {'type': 'array', 'items': sub_schema}
elif field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}:
f_schema, f_definitions, f_nested_models = field_singleton_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
ref_template=ref_template,
known_models=known_models,
)
definitions.update(f_definitions)
nested_models.update(f_nested_models)
else:
raise ValueError(f'Unknown shape: {field.shape}')
# check field type to avoid repeated calls to the same __modify_schema__ method
if field.type_ != field.outer_type_:
modify_schema = getattr(field.outer_type_, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
return f_schema, definitions, nested_models
|
2,450 |
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Check if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the
estimator is fitted or not.
Parameters
----------
estimator : estimator instance
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])
elif hasattr(estimator, "__sklearn_is_fitted__"):
fitted = estimator.__sklearn_is_fitted__()
else:
fitted = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
if not fitted:
raise NotFittedError(msg % {"name": type(estimator).__name__})
|
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):
"""Perform `is_fitted` validation for estimator.
Check if the estimator is fitted by verifying the presence of
fitted attributes (ending with a trailing underscore) and otherwise
raises a NotFittedError with the given message.
If an estimator does not set any attributes with a trailing underscore, it
can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the
estimator is fitted or not.
Parameters
----------
estimator : estimator instance
estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
msg : str, default=None
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this
estimator."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if isclass(estimator):
raise TypeError("{} is a class, not an instance.".format(estimator))
if msg is None:
msg = (
"This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
if not hasattr(estimator, "fit"):
raise TypeError("%s is not an estimator instance." % (estimator))
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])
elif hasattr(estimator, "__sklearn_is_fitted__"):
fitted = estimator.__sklearn_is_fitted__()
else:
fitted = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
if not fitted:
raise NotFittedError(msg % {"name": type(estimator).__name__})
|
15,130 |
def extract_entities(
hass: HomeAssistantType,
template: Optional[str],
variables: Optional[Dict[str, Any]] = None,
) -> Union[str, List[str]]:
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_JINJA_DELIMITERS.search(template) is None:
return []
if _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction_final = []
for result in _RE_GET_ENTITIES.finditer(template):
if (
result.group("entity_id") == "trigger.entity_id"
and variables
and "trigger" in variables
and "entity_id" in variables["trigger"]
):
extraction_final.append(variables["trigger"]["entity_id"])
elif result.group("entity_id"):
if result.group("func") == "expand":
group_entity_id = result.group("entity_id")
group_entity = _get_state(hass, group_entity_id)
# pylint: disable=import-outside-toplevel
from homeassistant.components import group
if (
split_entity_id(group_entity_id)[0] == group.DOMAIN
and group_entity is not None
):
# Expand group, but do not use `expand` template function
# here. Group entities may not have been initialized yet and
# could be thrown out.
group_entities = group_entity.attributes.get(ATTR_ENTITY_ID)
for entity_id in group_entities or []:
extraction_final.append(entity_id)
extraction_final.append(result.group("entity_id"))
if (
variables
and result.group("variable") in variables
and isinstance(variables[result.group("variable")], str)
and valid_entity_id(variables[result.group("variable")])
):
extraction_final.append(variables[result.group("variable")])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
|
def extract_entities(
hass: HomeAssistantType,
template: Optional[str],
variables: Optional[Dict[str, Any]] = None,
) -> Union[str, List[str]]:
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_JINJA_DELIMITERS.search(template) is None:
return []
if _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction_final = []
for result in _RE_GET_ENTITIES.finditer(template):
if (
result.group("entity_id") == "trigger.entity_id"
and variables
and "trigger" in variables
and "entity_id" in variables["trigger"]
):
extraction_final.append(variables["trigger"]["entity_id"])
elif result.group("entity_id"):
if result.group("func") == "expand":
group_entity_id = result.group("entity_id")
group_entity = hass.states.get(group_entity_id)
# pylint: disable=import-outside-toplevel
from homeassistant.components import group
if (
split_entity_id(group_entity_id)[0] == group.DOMAIN
and group_entity is not None
):
# Expand group, but do not use `expand` template function
# here. Group entities may not have been initialized yet and
# could be thrown out.
group_entities = group_entity.attributes.get(ATTR_ENTITY_ID)
for entity_id in group_entities or []:
extraction_final.append(entity_id)
extraction_final.append(result.group("entity_id"))
if (
variables
and result.group("variable") in variables
and isinstance(variables[result.group("variable")], str)
and valid_entity_id(variables[result.group("variable")])
):
extraction_final.append(variables[result.group("variable")])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
|
45,866 |
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
"""
assert image.dim() >= 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
B, _, rows, cols = image.shape
if image.dtype != torch.float:
image = image.float()
# Create point coordinates for each pixel of the image
x, y = torch.meshgrid(torch.arange(cols), torch.arange(rows))
pts: torch.Tensor = torch.cat([x.T.float().reshape(-1,1), y.T.reshape(-1,1)], 1) # (rows*cols)x2
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
out = torch.round(torch.clamp(out, 0, 255)).to(torch.uint8)
return out
|
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
"""
assert image.dim() >= 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
B, _, rows, cols = image.shape
if image.dtype != torch.float:
image = image.float()
# Create point coordinates for each pixel of the image
x, y = torch.meshgrid(torch.arange(cols), torch.arange(rows))
pts: torch.Tensor = torch.cat([x.transpose(-2, -1).reshape(-1,1), y.transpose(-2, -1).reshape(-1,1)], 1) # (rows*cols)x2
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
out = torch.round(torch.clamp(out, 0, 255)).to(torch.uint8)
return out
|
31,784 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
args = demisto.args()
params = demisto.params()
api_key = params.get('apikey')
api_key_id = params.get('apikey_id')
base_url = urljoin(params['url'], '/public_api/v1')
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
nonce = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
auth_key = "%s%s%s" % (api_key, nonce, timestamp)
api_key_hash = hashlib.sha256(auth_key.encode("utf-8")).hexdigest()
headers = {
"x-xdr-timestamp": str(timestamp),
"x-xdr-nonce": nonce,
"x-xdr-auth-id": str(api_key_id),
"Authorization": api_key_hash
}
client = Client(
base_url=base_url,
verify=verify_cert,
headers=headers,
proxy=proxy)
generic_commands = init_generic_commands()
built_in_commands = init_built_in_commands()
if command in generic_commands:
return_results(generic_commands[command](client, args))
elif command in built_in_commands:
return_results(get_built_in_query_results_polling_command(client, args))
else:
raise NotImplementedError(f'Command {command} does not exist.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
finally:
get_integration_context().clear()
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
args = demisto.args()
params = demisto.params()
api_key = params.get('apikey')
api_key_id = params.get('apikey_id')
base_url = urljoin(params['url'], '/public_api/v1')
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
nonce = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
auth_key = "%s%s%s" % (api_key, nonce, timestamp)
api_key_hash = hashlib.sha256(auth_key.encode("utf-8")).hexdigest()
headers = {
"x-xdr-timestamp": str(timestamp),
"x-xdr-nonce": nonce,
"x-xdr-auth-id": api_key_id,
"Authorization": api_key_hash
}
client = Client(
base_url=base_url,
verify=verify_cert,
headers=headers,
proxy=proxy)
generic_commands = init_generic_commands()
built_in_commands = init_built_in_commands()
if command in generic_commands:
return_results(generic_commands[command](client, args))
elif command in built_in_commands:
return_results(get_built_in_query_results_polling_command(client, args))
else:
raise NotImplementedError(f'Command {command} does not exist.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
finally:
get_integration_context().clear()
|
13,541 |
def projection_shifts(A, E, Z, V, prev_shifts, shift_options):
"""Find further shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
Z
A |VectorArray| representing the currently computed low-rank
solution factor.
V
A |VectorArray| representing the currently computed iterate.
prev_shifts
A |NumPy array| containing the set of all previously used shift
parameters.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
if prev_shifts[prev_shifts.size - 1].imag != 0:
Q = gram_schmidt(cat_arrays([V.real, V.imag]), atol=0, rtol=0)
else:
Q = gram_schmidt(V, atol=0, rtol=0)
Ap = A.apply2(Q, Q)
Ep = E.apply2(Q, Q)
shifts = spla.eigvals(Ap, Ep)
shifts.imag[abs(shifts.imag) < np.finfo(float).eps] = 0
shifts = shifts[np.real(shifts) < 0]
if shifts.size == 0:
return prev_shifts
else:
if(shifts[np.imag(shifts) != 0].size > 0):
shifts = np.array(sorted(shifts, key=np.abs))
else:
shifts.sort()
return shifts
|
def projection_shifts(A, E, Z, V, prev_shifts, shift_options):
"""Find further shift parameters for low-rank ADI iteration using
Galerkin projection on spaces spanned by LR-ADI iterates.
See [PK16]_, pp. 92-95.
Parameters
----------
A
The |Operator| A from the corresponding Lyapunov equation.
E
The |Operator| E from the corresponding Lyapunov equation.
Z
A |VectorArray| representing the currently computed low-rank
solution factor.
V
A |VectorArray| representing the currently computed iterate.
prev_shifts
A |NumPy array| containing the set of all previously used shift
parameters.
shift_options
The shift options to use (see :func:`lyap_lrcf_solver_options`).
Returns
-------
shifts
A |NumPy array| containing a set of stable shift parameters.
"""
if prev_shifts[prev_shifts.size - 1].imag != 0:
Q = gram_schmidt(cat_arrays([V.real, V.imag]), atol=0, rtol=0)
else:
Q = gram_schmidt(V, atol=0, rtol=0)
Ap = A.apply2(Q, Q)
Ep = E.apply2(Q, Q)
shifts = spla.eigvals(Ap, Ep)
shifts.imag[abs(shifts.imag) < np.finfo(float).eps] = 0
shifts = shifts[np.real(shifts) < 0]
if shifts.size == 0:
return prev_shifts
else:
if(shifts[np.imag(shifts) != 0].size > 0):
shifts = shifts[np.abs(shifts).argsort()]
else:
shifts.sort()
return shifts
|
58,631 |
def get_model_subdirectories(
unpacked_model_path: Text
) -> Tuple[Optional[Text], Optional[Text]]:
"""Returns paths for core and nlu model directories, if they exist.
If neither directories exist, a `NoModelData` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, "core")
nlu_path = os.path.join(unpacked_model_path, "nlu")
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise NoModelData(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
|
def get_model_subdirectories(
unpacked_model_path: Text
) -> Tuple[Optional[Text], Optional[Text]]:
"""Returns paths for core and nlu model directories, if they exist.
If neither directories exist, a `NoModelData` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, "core")
nlu_path = os.path.join(unpacked_model_path, "nlu")
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise NoModelData(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
|
58,773 |
def convert_relayir_to_nnapi(func):
"""Converts a Relay IR Function to Android NNAPI C++ source code
Parameters
----------
func: tvm.relay.Function
The function to be converted to Android NNAPI
Returns
-------
code: str
The resulting Android NNAPI code
Note
----
Certain function attributes should be configured:
* func.attrs.NnapiClassName: (str) The name of the generated class wrapped around ANN model
* func.attrs.NnapiTargetVersion: (int) The targeting API level of Android
"""
assert isinstance(func, tvm.relay.Function)
options = {
"class": {
"self": {
"name": str(func.attrs.NnapiClassName),
},
},
"target": {
"api_level": int(func.attrs.NnapiTargetVersion),
},
}
converter = Converter(options)
return converter.convert(func)
|
def convert_relayir_to_nnapi(func):
"""Converts a Relay IR Function to Android NNAPI C++ source code
Parameters
----------
func: tvm.relay.Function
The function to be converted to Android NNAPI
Returns
-------
code: str
The resulting Android NNAPI code
Notes
-----
Certain function attributes should be configured:
* func.attrs.NnapiClassName: (str) The name of the generated class wrapped around ANN model
* func.attrs.NnapiTargetVersion: (int) The targeting API level of Android
"""
assert isinstance(func, tvm.relay.Function)
options = {
"class": {
"self": {
"name": str(func.attrs.NnapiClassName),
},
},
"target": {
"api_level": int(func.attrs.NnapiTargetVersion),
},
}
converter = Converter(options)
return converter.convert(func)
|
8,957 |
def find_sopel_modules_plugins():
"""List plugins from ``sopel_modules.*``.
:return: yield instance of :class:`~.handlers.PyModulePlugin`
configured for ``sopel_modules.*``
Before entry point plugins, the only way to package a plugin was to follow
the :pep:`382` by using the ``sopel_modules`` namespace. This function is
responsible to load such plugins.
"""
try:
import sopel_modules
except ImportError:
return
for plugin_dir in set(sopel_modules.__path__):
for name, _ in _list_plugin_filenames(plugin_dir):
yield handlers.PyModulePlugin(name, 'sopel_modules')
|
def find_sopel_modules_plugins():
"""List plugins from ``sopel_modules.*``.
:return: yield instances of :class:`~.handlers.PyModulePlugin`
configured for ``sopel_modules.*``
Before entry point plugins, the only way to package a plugin was to follow
the :pep:`382` by using the ``sopel_modules`` namespace. This function is
responsible to load such plugins.
"""
try:
import sopel_modules
except ImportError:
return
for plugin_dir in set(sopel_modules.__path__):
for name, _ in _list_plugin_filenames(plugin_dir):
yield handlers.PyModulePlugin(name, 'sopel_modules')
|
10,366 |
def db_dump(module, host, user, password, db_name, target, all_databases, port,
config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
single_transaction=None, quick=None, ignore_tables=None, hex_blob=None,
encoding=None, force=False, create_new=None):
cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option
if config_file:
cmd += " --defaults-extra-file=%s" % shlex_quote(config_file)
if user is not None:
cmd += " --user=%s" % shlex_quote(user)
if password is not None:
cmd += " --password=%s" % shlex_quote(password)
if ssl_cert is not None:
cmd += " --ssl-cert=%s" % shlex_quote(ssl_cert)
if ssl_key is not None:
cmd += " --ssl-key=%s" % shlex_quote(ssl_key)
if ssl_ca is not None:
cmd += " --ssl-ca=%s" % shlex_quote(ssl_ca)
if force:
cmd += " --force"
if socket is not None:
cmd += " --socket=%s" % shlex_quote(socket)
else:
cmd += " --host=%s --port=%i" % (shlex_quote(host), port)
if all_databases:
cmd += " --all-databases"
else:
cmd += " --databases {0} --skip-lock-tables".format(' '.join(db_name))
if (encoding is not None) and (encoding != ""):
cmd += " --default-character-set=%s" % shlex_quote(encoding)
if single_transaction:
cmd += " --single-transaction=true"
if quick:
cmd += " --quick"
if create_new:
cmd += " --no-create-db"
if ignore_tables:
for an_ignored_table in ignore_tables:
cmd += " --ignore-table={0}".format(an_ignored_table)
if hex_blob:
cmd += " --hex-blob"
path = None
if os.path.splitext(target)[-1] == '.gz':
path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target))
else:
cmd += " > %s" % shlex_quote(target)
executed_commands.append(cmd)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
|
def db_dump(module, host, user, password, db_name, target, all_databases, port,
config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
single_transaction=None, quick=None, ignore_tables=None, hex_blob=None,
encoding=None, force=False, create_new=None):
cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option
if config_file:
cmd += " --defaults-extra-file=%s" % shlex_quote(config_file)
if user is not None:
cmd += " --user=%s" % shlex_quote(user)
if password is not None:
cmd += " --password=%s" % shlex_quote(password)
if ssl_cert is not None:
cmd += " --ssl-cert=%s" % shlex_quote(ssl_cert)
if ssl_key is not None:
cmd += " --ssl-key=%s" % shlex_quote(ssl_key)
if ssl_ca is not None:
cmd += " --ssl-ca=%s" % shlex_quote(ssl_ca)
if force:
cmd += " --force"
if socket is not None:
cmd += " --socket=%s" % shlex_quote(socket)
else:
cmd += " --host=%s --port=%i" % (shlex_quote(host), port)
if all_databases:
cmd += " --all-databases"
else:
cmd += " --databases {0} --skip-lock-tables".format(' '.join(db_name))
if (encoding is not None) and (encoding != ""):
cmd += " --default-character-set=%s" % shlex_quote(encoding)
if single_transaction:
cmd += " --single-transaction=true"
if quick:
cmd += " --quick"
if no_create_db:
cmd += " --no-create-db"
if ignore_tables:
for an_ignored_table in ignore_tables:
cmd += " --ignore-table={0}".format(an_ignored_table)
if hex_blob:
cmd += " --hex-blob"
path = None
if os.path.splitext(target)[-1] == '.gz':
path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, shlex_quote(target))
else:
cmd += " > %s" % shlex_quote(target)
executed_commands.append(cmd)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
|
58,092 |
def search_command(mailbox=None):
"""
Searches for Gmail records of a specified Google user.
"""
args = demisto.args()
user_id = args.get('user-id') if mailbox is None else mailbox
mailbox = ADMIN_EMAIL if user_id == 'me' else user_id
subject = args.get('subject', '')
_from = args.get('from', '')
to = args.get('to', '')
before = args.get('before', '')
after = args.get('after', '')
filename = args.get('filename', '')
_in = args.get('in', '')
query = args.get('query', '')
fields = args.get('fields') # TODO
label_ids = [lbl for lbl in args.get('labels-ids', '').split(',') if lbl != '']
max_results = int(args.get('max-results', 100))
page_token = args.get('page-token')
include_spam_trash = args.get('include-spam-trash', False)
has_attachments = args.get('has-attachments')
has_attachments = None if has_attachments is None else bool(
strtobool(has_attachments))
receive_only_accounts = argToBoolean(args.get('receive_only_accounts', 'false'))
if max_results > 500:
raise ValueError(
'maxResults must be lower than 500, got %s' % (max_results,))
mails, q = search(user_id, subject, _from, to,
before, after, filename, _in, query,
fields, label_ids, max_results, page_token,
include_spam_trash, has_attachments, receive_only_accounts)
# In case the user wants only account list without content.
if receive_only_accounts:
if mails:
return {'Mailbox': mailbox, 'q': q}
return {'Mailbox': None, 'q': q}
res = emails_to_entry('Search in {}:\nquery: "{}"'.format(mailbox, q), mails, 'full', mailbox)
return res
|
def search_command(mailbox=None):
"""
Searches for Gmail records of a specified Google user.
"""
args = demisto.args()
user_id = args.get('user-id') if mailbox is None else mailbox
mailbox = ADMIN_EMAIL if user_id == 'me' else user_id
subject = args.get('subject', '')
_from = args.get('from', '')
to = args.get('to', '')
before = args.get('before', '')
after = args.get('after', '')
filename = args.get('filename', '')
_in = args.get('in', '')
query = args.get('query', '')
fields = args.get('fields') # TODO
label_ids = [lbl for lbl in args.get('labels-ids', '').split(',') if lbl != '']
max_results = int(args.get('max-results', 100))
page_token = args.get('page-token')
include_spam_trash = args.get('include-spam-trash', False)
has_attachments = args.get('has-attachments')
has_attachments = None if has_attachments is None else bool(
strtobool(has_attachments))
receive_only_accounts = argToBoolean(args.get('receive_only_accounts', 'false'))
if max_results > 500:
raise ValueError(
'maxResults must be lower than 500, got %s' % (max_results,))
mails, q = search(user_id, subject, _from, to,
before, after, filename, _in, query,
fields, label_ids, max_results, page_token,
include_spam_trash, has_attachments, receive_only_accounts,
)
# In case the user wants only account list without content.
if receive_only_accounts:
if mails:
return {'Mailbox': mailbox, 'q': q}
return {'Mailbox': None, 'q': q}
res = emails_to_entry('Search in {}:\nquery: "{}"'.format(mailbox, q), mails, 'full', mailbox)
return res
|
20,419 |
def user_list(fields=None):
from yunohost.utils.ldap import _get_ldap_interface
ldap_attrs = {
'username': 'uid',
'password': 'uid',
'fullname': 'cn',
'firstname': 'givenName',
'lastname': 'sn',
'mail': 'mail',
'mail-alias': 'mail',
'mail-forward': 'maildrop',
'mailbox-quota': 'mailuserquota',
'groups': 'memberOf',
'shell': 'loginShell',
'home-path': 'homeDirectory'
}
def display_default(values, _):
return values[0] if len(values) == 1 else values
display = {
'password': lambda values, user: '',
'mail': lambda values, user: display_default(values[:1], user),
'mail-alias': lambda values, _: values[1:],
'mail-forward': lambda values, user: [forward for forward in values if forward != user['uid'][0]],
'groups': lambda values, user: [
group[3:].split(',')[0]
for group in values
if not group.startswith('cn=all_users,') and
not group.startswith('cn=' + user['uid'][0] + ',')],
'shell': lambda values, _: len(values) > 0 and values[0].strip() == "/bin/false"
}
attrs = set(['uid'])
users = {}
if not fields:
fields = ['username', 'fullname', 'mail', 'mailbox-quota', 'shell']
for field in fields:
if field in ldap_attrs:
attrs |= set([ldap_attrs[field]])
else:
raise YunohostError('field_invalid', field)
ldap = _get_ldap_interface()
result = ldap.search(
"ou=users,dc=yunohost,dc=org",
"(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
attrs,
)
for user in result:
entry = {}
for field in fields:
values = []
if ldap_attrs[field] in user:
values = user[ldap_attrs[field]]
entry[field] = display.get(field, display_default)(values, user)
users[user['uid'][0]] = entry
return {"users": users}
|
def user_list(fields=None):
from yunohost.utils.ldap import _get_ldap_interface
ldap_attrs = {
'username': 'uid',
'password': 'uid',
'fullname': 'cn',
'firstname': 'givenName',
'lastname': 'sn',
'mail': 'mail',
'mail-alias': 'mail',
'mail-forward': 'maildrop',
'mailbox-quota': 'mailuserquota',
'groups': 'memberOf',
'shell': 'loginShell',
'home-path': 'homeDirectory'
}
def display_default(values, _):
return values[0] if len(values) == 1 else values
display = {
'password': lambda values, user: '',
'mail': lambda values, user: display_default(values[:1], user),
'mail-alias': lambda values, _: values[1:],
'mail-forward': lambda values, user: [forward for forward in values if forward != user['uid'][0]],
'groups': lambda values, user: [
group[3:].split(',')[0]
for group in values
if not group.startswith('cn=all_users,') and
not group.startswith('cn=' + user['uid'][0] + ',')],
'shell': lambda values, _: len(values) > 0 and values[0].strip() == "/bin/false"
}
attrs = set(['uid'])
users = {}
if not fields:
fields = ['username', 'fullname', 'mail', 'mailbox-quota']
for field in fields:
if field in ldap_attrs:
attrs |= set([ldap_attrs[field]])
else:
raise YunohostError('field_invalid', field)
ldap = _get_ldap_interface()
result = ldap.search(
"ou=users,dc=yunohost,dc=org",
"(&(objectclass=person)(!(uid=root))(!(uid=nobody)))",
attrs,
)
for user in result:
entry = {}
for field in fields:
values = []
if ldap_attrs[field] in user:
values = user[ldap_attrs[field]]
entry[field] = display.get(field, display_default)(values, user)
users[user['uid'][0]] = entry
return {"users": users}
|
31,110 |
def fetch_incidents(last_run: dict, first_fetch_period: str):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=demisto.params().get("max_limit", 0),
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(
demisto.params().get("min_severity", "medium")
),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJson": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = result["data"][-1]["createdTimestamp"] + 1
return last_run, incidents
|
def fetch_incidents(last_run: dict, first_fetch_period: str):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=demisto.params().get("max_fetch", 50),
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(
demisto.params().get("min_severity", "medium")
),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJson": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = result["data"][-1]["createdTimestamp"] + 1
return last_run, incidents
|
56,246 |
def build_argparser():
parser = ArgumentParser()
general = parser.add_argument_group('General')
general.add_argument('-i', '--input', required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
general.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
general.add_argument('-o', '--output',
help='Optional. Name of output to save.')
general.add_argument('-limit', '--output_limit', default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
general.add_argument('--output_resolution', default=None, type=resolution,
help='Optional. Specify the maximum output window resolution '
'in (width x height) format. Example: 1280x720. '
'Input frame size used by default.')
general.add_argument('--no_show', action='store_true',
help="Optional. Don't show output.")
general.add_argument('-cw', '--crop_width', default=0, type=int,
help='Optional. Crop the input stream to this width. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('-ch', '--crop_height', default=0, type=int,
help='Optional. Crop the input stream to this height. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'),
help='Optional. Algorithm for face matching. Default: HUNGARIAN.')
general.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
gallery = parser.add_argument_group('Faces database')
gallery.add_argument('-fg', type=Path, required=True,
help='Required. Path to the face images directory.')
gallery.add_argument('--run_detector', action='store_true',
help='Optional. Use Face Detection model to find faces '
'on the face images, otherwise use full images.')
gallery.add_argument('--allow_grow', action='store_true',
help='Optional. Allow to grow faces gallery and to dump on disk. '
'Available only if --no_show option is off.')
models = parser.add_argument_group('Models')
models.add_argument('-m_fd', type=Path, required=True,
help='Required. Path to an .xml file with Face Detection model.')
models.add_argument('-m_lm', type=Path, required=True,
help='Required. Path to an .xml file with Facial Landmarks Detection model.')
models.add_argument('-m_reid', type=Path, required=True,
help='Required. Path to an .xml file with Face Reidentification model.')
models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int,
help='Optional. Specify the input width of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int,
help='Optional. Specify the input height of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
infer = parser.add_argument_group('Inference options')
infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Detection model. '
'Default value is CPU.')
infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Facial Landmarks Detection '
'model. Default value is CPU.')
infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Reidentification '
'model. Default value is CPU.')
infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='',
help='Optional. For MKLDNN (CPU)-targeted custom layers, '
'if any. Path to a shared library with custom '
'layers implementations.')
infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='',
help='Optional. For clDNN (GPU)-targeted custom layers, '
'if any. Path to the XML file with descriptions '
'of the kernels.')
infer.add_argument('-v', '--verbose', action='store_true',
help='Optional. Be more verbose.')
infer.add_argument('-pc', '--perf_stats', action='store_true',
help='Optional. Output detailed per-layer performance stats.')
infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6,
help='Optional. Probability threshold for face detections.')
infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3,
help='Optional. Cosine distance threshold between two vectors '
'for face identification.')
infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15,
help='Optional. Scaling ratio for bboxes passed to face recognition.')
return parser
|
def build_argparser():
parser = ArgumentParser()
general = parser.add_argument_group('General')
general.add_argument('-i', '--input', required=True,
help='Required. An input to process. The input must be a single image, '
'a folder of images, video file or camera id.')
general.add_argument('--loop', default=False, action='store_true',
help='Optional. Enable reading the input in a loop.')
general.add_argument('-o', '--output',
help='Optional. Name of output to save.')
general.add_argument('-limit', '--output_limit', default=1000, type=int,
help='Optional. Number of frames to store in output. '
'If 0 is set, all frames are stored.')
general.add_argument('--output_resolution', default=None, type=resolution,
help='Optional. Specify the maximum output window resolution '
'in (width x height) format. Example: 1280x720. '
'Input frame size used by default.')
general.add_argument('--no_show', action='store_true',
help="Optional. Don't show output.")
general.add_argument('-cw', '--crop_width', default=0, type=int,
help='Optional. Crop the input stream to this width. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('-ch', '--crop_height', default=0, type=int,
help='Optional. Crop the input stream to this height. '
'Both -cw and -ch parameters should be specified '
'to use crop.')
general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'),
help='Optional. Algorithm for face matching. Default: HUNGARIAN.')
general.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
gallery = parser.add_argument_group('Faces database')
gallery.add_argument('-fg', type=Path, required=True,
help='Required. Path to the face images directory.')
gallery.add_argument('--run_detector', action='store_true',
help='Optional. Use Face Detection model to find faces '
'on the face images, otherwise use full images.')
gallery.add_argument('--allow_grow', action='store_true',
help='Optional. Allow to grow faces gallery and to dump on disk. '
'Available only if --no_show option is off.')
models = parser.add_argument_group('Models')
models.add_argument('-m_fd', type=Path, required=True,
help='Required. Path to an .xml file with Face Detection model.')
models.add_argument('-m_lm', type=Path, required=True,
help='Required. Path to an .xml file with Facial Landmarks Detection model.')
models.add_argument('-m_reid', type=Path, required=True,
help='Required. Path to an .xml file with Face Reidentification model.')
models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int,
help='Optional. Specify the input width of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int,
help='Optional. Specify the input height of detection model. '
'Both -fd_iw and -fd_ih parameters should be specified '
'for reshape.')
infer = parser.add_argument_group('Inference options')
infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Detection model. '
'Default value is CPU.')
infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Facial Landmarks Detection '
'model. Default value is CPU.')
infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS,
help='Optional. Target device for Face Reidentification '
'model. Default value is CPU.')
infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='',
help='Optional. For MKLDNN (CPU)-targeted custom layers, '
'if any. Path to a shared library with custom '
'layers implementations.')
infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='',
help='Optional. For clDNN (GPU)-targeted custom layers, '
'if any. Path to the XML file with descriptions '
'of the kernels.')
infer.add_argument('-v', '--verbose', action='store_true',
help='Optional. Be more verbose.')
infer.add_argument('-pc', '--perf_stats', action='store_true',
help='Optional. Output detailed per-layer performance stats.')
infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6,
help='Optional. Probability threshold for face detections.')
infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3,
help='Optional. Cosine distance threshold between two vectors '
'for face identification.')
infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15,
help='Optional. Scaling ratio for bboxes passed to face recognition.')
return parser
|
14,629 |
def parse_config_file(config_path, log_level=logging.INFO): # noqa: C901
"""
Parses a SKLL experiment configuration file with the given path.
Log messages with the given log level (default: INFO).
Parameters
----------
config_path : str
The path to the configuration file.
log_level : logging level, optional, default=logging.INFO
The logging level to use.
Returns
-------
experiment_name : str
A string used to identify this particular experiment configuration.
When generating result summary files, this name helps prevent
overwriting previous summaries.
task : str
The types of experiment we're trying to run (e.g. 'cross_validate').
sampler : str
The name of a sampler to perform non-linear transformations of the input.
fixed_sampler_parameters : dict
A dictionary containing parameters you want to have fixed for the sampler.
feature_hasher : bool
If True, this enables a high-speed, low-memory vectorizer that uses
feature hashing for converting feature dictionaries into NumPy arrays
instead of using a DictVectorizer.
hasher_features : int
The number of features used by the FeatureHasher if the feature_hasher
flag is enabled.
id_col : str
The column with IDs.
label_col : str
The column with labels.
train_set_name : str
The name of the training set.
test_set_name : str
The name of the test set.
suffix : str
The file format the training/test files are in.
featuresets : list of str
A list of lists of prefixes for the files containing
the features you would like to train/test on.
do_shuffle : bool
Whether to shuffle the data.
model_path : str
The path to the model file(s).
do_grid_search : bool
Whether to perform grid search.
grid_objectives : list of str
A list of scoring functions to use for tuning.
probability : bool
Whether to output probabilities for each class.
pipeline : bool
Whether to include the `pipeline` attribute in the
trained model. This will increase the size of the
model file.
results_path : str
Path to store result files in.
pos_label_str : str
The string label for the positive class in the binary
classification setting.
feature_scaling : str
How to scale features (e.g. 'with_mean').
min_feature_count : int
The minimum number of examples for which the value of a
feature must be nonzero to be included in the model.
folds_file : str
The path to the folds_file, if specified.
grid_search_jobs : int
Number of folds to run in parallel when using grid search.
grid_search_folds : int
The number of folds to use for grid search.
cv_folds : dict or int
The specified folds mapping, or the number of folds.
save_cv_folds : bool
Whether to save CV Folds to file.
save_cv_models : bool
Whether to save CV models.
use_folds_file_for_grid_search : bool
Whether to use folds file for grid search.
do_stratified_folds : bool
Whether to use random folds for cross-validation.
fixed_parameter_list : list of dict
List of dicts containing parameters you want to have fixed for
each classifier in learners list.
param_grid_list : list of dict
List of parameter grids to search, one dict for each learner.
featureset_names : list of str
The names of the featuresets used for each job.
learners : list of str
A list of learners to try using.
prediction_dir : str
The directories where predictions are saved.
log_path : str
The path to the log file.
train_path : str
The path to a file containing feature to train on.
test_path : str
The path to a file containing features to test on.
ids_to_floats : bool
Whether to convert IDs to floats.
class_map : dict
A class map collapsing several labels into one.
custom_learner_path : str
Path to a .py file that defines a custom learner.
custom_metric_path : str
Path to a .py file that defines a custom metric.
learning_curve_cv_folds_list : list of int
A list of integers specifying the number of folds to use for CV.
learning_curve_train_sizes : list of float or list of int
List of floats or integers representing relative or absolute numbers
of training examples that will be used to generate the learning
curve respectively.
output_metrics : list
A list of output metrics to use.
save_votes : bool
Whether to save the individual predictions from voting learners.
Raises
------
IOError
If configuration file name is empty
ValueError
If various configuration parameters are incorrectly specified,
or cause conflicts.
"""
# check that config_path is not empty
if config_path == "":
raise IOError("The name of the configuration file is empty")
# compute the absolute path for the config file
config_path = realpath(config_path)
config_dir = dirname(config_path)
# set up a config parser with the above default values
config = _setup_config_parser(config_path)
# extract parameters from the various sections in the config file
######################
# 1. General section #
######################
if config.has_option("General", "experiment_name"):
experiment_name = config.get("General", "experiment_name")
else:
raise ValueError("Configuration file does not contain experiment_name "
"in the [General] section.")
# next, get the log path before anything else since we need to
# save all logging messages to a log file in addition to displaying
# them on the console
# if the user specified "log" instead of "logs", we need
# to show a warning and save its value in "logs" instead
log_value = config.get("Output", "log")
show_log_warning = False
if log_value:
# since we do not have a logger yet, we will show the warning later
show_log_warning = True
config.set('Output', 'logs', log_value)
config.remove_option('Output', 'log')
# now get the value from the 'logs' field
log_value = config.get("Output", "logs")
try:
log_path = locate_file(log_value, config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
log_path = e.filename
os.makedirs(log_path)
# Create a top-level log file under the log path
main_log_file = join(log_path, f'{experiment_name}.log')
# Now create a SKLL logger that will log to this file as well
# as to the console. Use the log level provided - note that
# we only have to do this the first time we call `get_skll_logger()`
# with a given name.
logger = get_skll_logger('experiment',
filepath=main_log_file,
log_level=log_level)
# now show the 'log' warning, if any, since we have the logger set up
if show_log_warning:
logger.warning("The 'log' option in the [Output] section is deprecated "
"and will be removed in the next version. Use 'logs' "
"instead.")
show_log_warning = False
if config.has_option("General", "task"):
task = config.get("General", "task")
else:
raise ValueError("Configuration file does not contain task in the "
"[General] section.")
if task not in VALID_TASKS:
raise ValueError(f'An invalid task was specified: {task}. Valid '
f'tasks are: {", ".join(VALID_TASKS)}')
####################
# 2. Input section #
####################
sampler = config.get("Input", "sampler")
if sampler not in VALID_SAMPLERS:
raise ValueError(f'An invalid sampler was specified: {sampler}. Valid'
f' samplers are: {", ".join(VALID_SAMPLERS)}')
# produce warnings if feature_hasher is set but hasher_features
# is less than or equal to zero.
feature_hasher = config.getboolean("Input", "feature_hasher")
hasher_features = config.getint("Input", "hasher_features")
if feature_hasher:
if hasher_features <= 0:
raise ValueError("Configuration file must specify a non-zero value "
"for the option hasher_features when "
"feature_hasher is True.")
# produce warnings if hasher_features is set but feature_hasher
# is not set correctly
elif hasher_features > 0:
logger.warning("Ignoring hasher_features since feature_hasher is either"
" missing or set to False.")
if config.has_option("Input", "learners"):
learners_string = config.get("Input", "learners")
else:
raise ValueError("Configuration file does not contain list of learners "
"in [Input] section.")
learners = yaml.safe_load(fix_json(learners_string))
if len(learners) == 0:
raise ValueError("Configuration file contains an empty list of learners"
" in the [Input] section.")
elif len(set(learners)) < len(learners):
raise ValueError('Configuration file contains the same learner multiple'
' times, which is not currently supported. Please use'
' param_grids with tuning to find the optimal settings'
' for the learner.')
custom_learner_path = locate_file(config.get("Input", "custom_learner_path"),
config_dir)
# get the custom metric path, if specified, and locate it
custom_metric_path = locate_file(config.get("Input", "custom_metric_path"),
config_dir)
# get the featuresets
featuresets_string = config.get("Input", "featuresets")
featuresets = yaml.safe_load(fix_json(featuresets_string))
# ensure that featuresets is either a list of features or a list of lists
# of features
if not isinstance(featuresets, list) or not all(isinstance(fs, list) for fs
in featuresets):
raise ValueError("The featuresets parameter should be a list of "
"features or a list of lists of features. You "
f"specified: {featuresets}")
featureset_names = yaml.safe_load(fix_json(config.get("Input",
"featureset_names")))
# ensure that featureset_names is a list of strings, if specified
if featureset_names:
if (not isinstance(featureset_names, list) or
not all([isinstance(fs, str) for fs in
featureset_names])):
raise ValueError("The featureset_names parameter should be a list "
f"of strings. You specified: {featureset_names}")
# get the value for learning_curve_cv_folds and ensure
# that it's a list of the same length as the value of
# learners. If it's not specified, then we just assume
# that we are using 10 folds for each learner.
learning_curve_cv_folds_list_string = config.get("Input",
"learning_curve_cv_folds_list")
learning_curve_cv_folds_list = yaml.safe_load(fix_json(learning_curve_cv_folds_list_string))
if len(learning_curve_cv_folds_list) == 0:
learning_curve_cv_folds_list = [10] * len(learners)
else:
if (not isinstance(learning_curve_cv_folds_list, list) or
not all([isinstance(fold, int) for fold in learning_curve_cv_folds_list]) or
not len(learning_curve_cv_folds_list) == len(learners)):
raise ValueError("The learning_curve_cv_folds parameter should "
"be a list of integers of the same length as "
"the number of learners. You specified: "
f"{learning_curve_cv_folds_list}")
# get the value for learning_curve_train_sizes and ensure
# that it's a list of either integers (sizes) or
# floats (proportions). If it's not specified, then we just
# assume that we are using np.linspace(0.1, 1.0, 5).
learning_curve_train_sizes_string = config.get("Input", "learning_curve_train_sizes")
learning_curve_train_sizes = yaml.safe_load(fix_json(learning_curve_train_sizes_string))
if len(learning_curve_train_sizes) == 0:
learning_curve_train_sizes = np.linspace(0.1, 1.0, 5).tolist()
else:
if (not isinstance(learning_curve_train_sizes, list) or
not all([isinstance(size, int) or isinstance(size, float) for size in
learning_curve_train_sizes])):
raise ValueError("The learning_curve_train_sizes parameter should "
"be a list of integers or floats. You specified: "
f"{learning_curve_train_sizes}")
# do we need to shuffle the training data
do_shuffle = config.getboolean("Input", "shuffle")
fixed_parameter_list = yaml.safe_load(fix_json(config.get("Input",
"fixed_parameters")))
fixed_sampler_parameters = fix_json(config.get("Input",
"sampler_parameters"))
fixed_sampler_parameters = yaml.safe_load(fixed_sampler_parameters)
param_grid_list = yaml.safe_load(fix_json(config.get("Tuning", "param_grids")))
# read and normalize the value of `pos_label_str`
pos_label_str = safe_float(config.get("Tuning", "pos_label_str"))
if pos_label_str == '':
pos_label_str = None
# ensure that feature_scaling is specified only as one of the
# four available choices
feature_scaling = config.get("Input", "feature_scaling")
if feature_scaling not in VALID_FEATURE_SCALING_OPTIONS:
raise ValueError("Invalid value for feature_scaling parameter: "
f"{feature_scaling}")
suffix = config.get("Input", "suffix")
label_col = config.get("Input", "label_col")
id_col = config.get("Input", "id_col")
ids_to_floats = config.getboolean("Input", "ids_to_floats")
# if an external folds file is specified, then read it into a dictionary
folds_file = locate_file(config.get("Input", "folds_file"), config_dir)
num_cv_folds = config.getint("Input", "num_cv_folds")
specified_folds_mapping = None
specified_num_folds = None
if folds_file:
specified_folds_mapping = load_cv_folds(folds_file, ids_to_floats=ids_to_floats)
else:
# if no file is specified, then set the number of folds for cross-validation
specified_num_folds = num_cv_folds if num_cv_folds else 10
# whether or not to save the cv fold ids/models
save_cv_folds = config.getboolean("Output", "save_cv_folds")
save_cv_models = config.getboolean("Output", "save_cv_models")
# whether or not to do stratified cross validation
random_folds = config.getboolean("Input", "random_folds")
if random_folds:
if folds_file:
logger.warning('Specifying "folds_file" overrides "random_folds".')
do_stratified_folds = False
else:
do_stratified_folds = True
# get all the input paths and directories (without trailing slashes)
train_path = config.get("Input", "train_directory").rstrip(os.sep)
test_path = config.get("Input", "test_directory").rstrip(os.sep)
train_file = config.get("Input", "train_file")
test_file = config.get("Input", "test_file")
# make sure that featuresets is not an empty list unless
# train_file and test_file are specified
if not train_file and not test_file and (isinstance(featuresets, list) and
len(featuresets) == 0):
raise ValueError(
"The 'featuresets' parameters cannot be an empty list.")
# The user must specify either train_file or train_path, not both.
if not train_file and not train_path:
raise ValueError('Invalid [Input] parameters: either "train_file" or '
'"train_directory" must be specified in the '
'configuration file.')
# Either train_file or train_path must be specified.
if train_file and train_path:
raise ValueError('Invalid [Input] parameters: only either "train_file"'
' or "train_directory" can be specified in the '
'configuration file, not both.')
# Cannot specify both test_file and test_path
if test_file and test_path:
raise ValueError('Invalid [Input] parameters: only either "test_file" '
'or "test_directory" can be specified in the '
'configuration file, not both.')
# if train_file is specified, then assign its value to train_path
# this is a workaround to make this simple use case (a single train and
# test file) compatible with the existing architecture using
# featuresets
if train_file:
train_path = train_file
featuresets = [[f'train_{basename(train_file)}']]
suffix = ''
# if test_file is specified, then assign its value to test_path to
# enable compatibility with the pre-existing featuresets architecture
if test_file:
test_path = test_file
featuresets[0][0] += f'_test_{basename(test_file)}'
# make sure all the specified paths/files exist
train_path = locate_file(train_path, config_dir)
test_path = locate_file(test_path, config_dir)
# Get class mapping dictionary if specified
class_map_string = config.get("Input", "class_map")
original_class_map = yaml.safe_load(fix_json(class_map_string))
if original_class_map:
# Change class_map to map from originals to replacements instead of
# from replacement to list of originals
class_map = {}
for replacement, original_list in original_class_map.items():
for original in original_list:
class_map[original] = replacement
del original_class_map
else:
class_map = None
#####################
# 3. Output section #
#####################
probability = config.getboolean("Output", "probability")
pipeline = config.getboolean("Output", "pipeline")
# do we want to keep the predictions?
# make sure the predictions path exists and if not create it
try:
prediction_dir = locate_file(config.get("Output", "predictions"),
config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
prediction_dir = e.filename
os.makedirs(prediction_dir)
# make sure model path exists and if not, create it
try:
model_path = locate_file(config.get("Output", "models"), config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
model_path = e.filename
os.makedirs(model_path)
# make sure results path exists
try:
results_path = locate_file(config.get("Output", "results"), config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
results_path = e.filename
os.makedirs(results_path)
# what are the output metrics?
output_metrics = config.get("Output", "metrics")
output_metrics = _parse_and_validate_metrics(output_metrics,
'metrics',
logger=logger)
# do we want to save the individual predictions from voting
# learner estimators?
save_votes = config.getboolean("Output", "save_votes")
#####################
# 4. Tuning section #
#####################
# do we need to run a grid search for the hyperparameters or are we just
# using the defaults?
do_grid_search = config.getboolean("Tuning", "grid_search")
# parse any provided grid objective functions
grid_objectives = config.get("Tuning", "objectives")
grid_objectives = _parse_and_validate_metrics(grid_objectives,
'objectives',
logger=logger)
# if we are doing learning curves , we don't care about
# grid search
if task == 'learning_curve' and do_grid_search:
do_grid_search = False
logger.warning("Grid search is not supported during "
"learning curve generation. Disabling.")
# Check if `param_grids` is specified, but `do_grid_search` is False
if param_grid_list and not do_grid_search:
logger.warning('Since "grid_search" is set to False, the specified'
' "param_grids" will be ignored.')
# Warn user about potential conflicts between parameter values
# specified in `fixed_parameter_list` and values specified in
# `param_grid_list` (or values passed in by default) if
# `do_grid_search` is True
if do_grid_search and fixed_parameter_list:
logger.warning('Note that "grid_search" is set to True and '
'"fixed_parameters" is also specified. If there '
'is a conflict between the grid search parameter'
' space and the fixed parameter values, the '
'fixed parameter values will take precedence.')
# minimum number of examples a feature must be nonzero in to be included
min_feature_count = config.getint("Tuning", "min_feature_count")
# if an external folds file was specified do we use the same folds file
# for the inner grid-search in cross-validate as well?
use_folds_file_for_grid_search = config.getboolean("Tuning",
"use_folds_file_for_grid_search")
# how many jobs should we run in parallel for grid search
grid_search_jobs = config.getint("Tuning", "grid_search_jobs")
if not grid_search_jobs:
grid_search_jobs = None
# how many folds should we run in parallel for grid search
grid_search_folds = config.getint("Tuning", "grid_search_folds")
# check whether the right things are set for the given task
if (task == 'evaluate' or task == 'predict') and not test_path:
raise ValueError('The test set must be set when task is evaluate or '
'predict.')
if task in ['cross_validate', 'evaluate', 'train']:
if do_grid_search and len(grid_objectives) == 0:
raise ValueError('Grid search is on. Either specify a list of tuning '
'objectives or set `grid_search` to `false` in the '
'Tuning section.')
if not do_grid_search and len(grid_objectives) > 0:
logger.warning('Since "grid_search" is set to False, any specified'
' "objectives" will be ignored.')
grid_objectives = []
if task in ['cross_validate', 'train', 'learning_curve'] and test_path:
raise ValueError('The test set should not be set when task is '
f'{task}.')
if task in ['train', 'predict'] and results_path and not do_grid_search:
raise ValueError('The results path should not be set when task is '
f'{task} and "grid_search" is set to False.')
if task == 'train' and not model_path:
raise ValueError('The model path should be set when task is train.')
if task in ['learning_curve', 'train'] and prediction_dir:
raise ValueError('The predictions path should not be set when task is '
f'{task}.')
if task == 'learning_curve' and model_path:
raise ValueError('The models path should not be set when task is '
'learning_curve.')
if task == 'learning_curve':
if len(grid_objectives) > 0:
raise ValueError("The \"objectives\" option is no longer supported"
" for the \"learning_curve\" task. Please use the"
" \"metrics\" option in the [Output] section "
"instead.")
if len(output_metrics) == 0:
raise ValueError('The "metrics" option must be set when '
'the task is "learning_curve".')
# if any of the objectives or metrics require probabilities to be output,
# probability must be specified as true
specified_probabilistic_metrics = PROBABILISTIC_METRICS.intersection(grid_objectives + output_metrics)
if specified_probabilistic_metrics and not probability:
raise ValueError("The 'probability' option must be 'true' "
" to compute the following: "
f"{list(specified_probabilistic_metrics)}.")
# set the folds appropriately based on the task:
# (a) if the task is `train`/`evaluate`/`predict` and if an external
# fold mapping is specified then use that mapping for grid search
# instead of the value contained in `grid_search_folds`.
# (b) if the task is `cross_validate` and an external fold mapping is specified
# then use that mapping for the outer CV loop and for the inner grid-search
# loop. However, if `use_folds_file_for_grid_search` is `False`, do not
# use the fold mapping for the inner loop.
cv_folds = None
if task in ['train', 'evaluate', 'predict'] and specified_folds_mapping:
grid_search_folds = specified_folds_mapping
# only print out the warning if the user actually wants to do grid search
if do_grid_search:
logger.warning("Specifying \"folds_file\" overrides both "
"explicit and default \"grid_search_folds\".")
if task == 'cross_validate':
cv_folds = specified_folds_mapping if specified_folds_mapping else specified_num_folds
if specified_folds_mapping:
logger.warning("Specifying \"folds_file\" overrides both "
"explicit and default \"num_cv_folds\".")
if use_folds_file_for_grid_search:
grid_search_folds = cv_folds
else:
# only print out the warning if the user wants to do grid search
if do_grid_search:
logger.warning("The specified \"folds_file\" will "
"not be used for inner grid search.")
if save_cv_models is True and not model_path:
raise ValueError("Output directory for models must be set if "
"\"save_cv_models\" is set to true.")
# Create feature set names if unspecified
if not featureset_names:
featureset_names = [_munge_featureset_name(x) for x in featuresets]
if len(featureset_names) != len(featuresets):
raise ValueError('Number of feature set names '
f'({len(featureset_names)}) does not match number of'
f' feature sets ({len(featuresets)}).')
# store training/test set names for later use
train_set_name = basename(train_path)
test_set_name = basename(test_path) if test_path else "cv"
return (experiment_name, task, sampler, fixed_sampler_parameters,
feature_hasher, hasher_features, id_col, label_col, train_set_name,
test_set_name, suffix, featuresets, do_shuffle, model_path,
do_grid_search, grid_objectives, probability, pipeline, results_path,
pos_label_str, feature_scaling, min_feature_count, folds_file,
grid_search_jobs, grid_search_folds, cv_folds, save_cv_folds,
save_cv_models, use_folds_file_for_grid_search, do_stratified_folds,
fixed_parameter_list, param_grid_list, featureset_names, learners,
prediction_dir, log_path, train_path, test_path, ids_to_floats,
class_map, custom_learner_path, custom_metric_path,
learning_curve_cv_folds_list, learning_curve_train_sizes,
output_metrics, save_votes)
|
def parse_config_file(config_path, log_level=logging.INFO): # noqa: C901
"""
Parses a SKLL experiment configuration file with the given path.
Log messages with the given log level (default: INFO).
Parameters
----------
config_path : str
The path to the configuration file.
log_level : logging level, default=logging.INFO
The logging level to use.
Returns
-------
experiment_name : str
A string used to identify this particular experiment configuration.
When generating result summary files, this name helps prevent
overwriting previous summaries.
task : str
The types of experiment we're trying to run (e.g. 'cross_validate').
sampler : str
The name of a sampler to perform non-linear transformations of the input.
fixed_sampler_parameters : dict
A dictionary containing parameters you want to have fixed for the sampler.
feature_hasher : bool
If True, this enables a high-speed, low-memory vectorizer that uses
feature hashing for converting feature dictionaries into NumPy arrays
instead of using a DictVectorizer.
hasher_features : int
The number of features used by the FeatureHasher if the feature_hasher
flag is enabled.
id_col : str
The column with IDs.
label_col : str
The column with labels.
train_set_name : str
The name of the training set.
test_set_name : str
The name of the test set.
suffix : str
The file format the training/test files are in.
featuresets : list of str
A list of lists of prefixes for the files containing
the features you would like to train/test on.
do_shuffle : bool
Whether to shuffle the data.
model_path : str
The path to the model file(s).
do_grid_search : bool
Whether to perform grid search.
grid_objectives : list of str
A list of scoring functions to use for tuning.
probability : bool
Whether to output probabilities for each class.
pipeline : bool
Whether to include the `pipeline` attribute in the
trained model. This will increase the size of the
model file.
results_path : str
Path to store result files in.
pos_label_str : str
The string label for the positive class in the binary
classification setting.
feature_scaling : str
How to scale features (e.g. 'with_mean').
min_feature_count : int
The minimum number of examples for which the value of a
feature must be nonzero to be included in the model.
folds_file : str
The path to the folds_file, if specified.
grid_search_jobs : int
Number of folds to run in parallel when using grid search.
grid_search_folds : int
The number of folds to use for grid search.
cv_folds : dict or int
The specified folds mapping, or the number of folds.
save_cv_folds : bool
Whether to save CV Folds to file.
save_cv_models : bool
Whether to save CV models.
use_folds_file_for_grid_search : bool
Whether to use folds file for grid search.
do_stratified_folds : bool
Whether to use random folds for cross-validation.
fixed_parameter_list : list of dict
List of dicts containing parameters you want to have fixed for
each classifier in learners list.
param_grid_list : list of dict
List of parameter grids to search, one dict for each learner.
featureset_names : list of str
The names of the featuresets used for each job.
learners : list of str
A list of learners to try using.
prediction_dir : str
The directories where predictions are saved.
log_path : str
The path to the log file.
train_path : str
The path to a file containing feature to train on.
test_path : str
The path to a file containing features to test on.
ids_to_floats : bool
Whether to convert IDs to floats.
class_map : dict
A class map collapsing several labels into one.
custom_learner_path : str
Path to a .py file that defines a custom learner.
custom_metric_path : str
Path to a .py file that defines a custom metric.
learning_curve_cv_folds_list : list of int
A list of integers specifying the number of folds to use for CV.
learning_curve_train_sizes : list of float or list of int
List of floats or integers representing relative or absolute numbers
of training examples that will be used to generate the learning
curve respectively.
output_metrics : list
A list of output metrics to use.
save_votes : bool
Whether to save the individual predictions from voting learners.
Raises
------
IOError
If configuration file name is empty
ValueError
If various configuration parameters are incorrectly specified,
or cause conflicts.
"""
# check that config_path is not empty
if config_path == "":
raise IOError("The name of the configuration file is empty")
# compute the absolute path for the config file
config_path = realpath(config_path)
config_dir = dirname(config_path)
# set up a config parser with the above default values
config = _setup_config_parser(config_path)
# extract parameters from the various sections in the config file
######################
# 1. General section #
######################
if config.has_option("General", "experiment_name"):
experiment_name = config.get("General", "experiment_name")
else:
raise ValueError("Configuration file does not contain experiment_name "
"in the [General] section.")
# next, get the log path before anything else since we need to
# save all logging messages to a log file in addition to displaying
# them on the console
# if the user specified "log" instead of "logs", we need
# to show a warning and save its value in "logs" instead
log_value = config.get("Output", "log")
show_log_warning = False
if log_value:
# since we do not have a logger yet, we will show the warning later
show_log_warning = True
config.set('Output', 'logs', log_value)
config.remove_option('Output', 'log')
# now get the value from the 'logs' field
log_value = config.get("Output", "logs")
try:
log_path = locate_file(log_value, config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
log_path = e.filename
os.makedirs(log_path)
# Create a top-level log file under the log path
main_log_file = join(log_path, f'{experiment_name}.log')
# Now create a SKLL logger that will log to this file as well
# as to the console. Use the log level provided - note that
# we only have to do this the first time we call `get_skll_logger()`
# with a given name.
logger = get_skll_logger('experiment',
filepath=main_log_file,
log_level=log_level)
# now show the 'log' warning, if any, since we have the logger set up
if show_log_warning:
logger.warning("The 'log' option in the [Output] section is deprecated "
"and will be removed in the next version. Use 'logs' "
"instead.")
show_log_warning = False
if config.has_option("General", "task"):
task = config.get("General", "task")
else:
raise ValueError("Configuration file does not contain task in the "
"[General] section.")
if task not in VALID_TASKS:
raise ValueError(f'An invalid task was specified: {task}. Valid '
f'tasks are: {", ".join(VALID_TASKS)}')
####################
# 2. Input section #
####################
sampler = config.get("Input", "sampler")
if sampler not in VALID_SAMPLERS:
raise ValueError(f'An invalid sampler was specified: {sampler}. Valid'
f' samplers are: {", ".join(VALID_SAMPLERS)}')
# produce warnings if feature_hasher is set but hasher_features
# is less than or equal to zero.
feature_hasher = config.getboolean("Input", "feature_hasher")
hasher_features = config.getint("Input", "hasher_features")
if feature_hasher:
if hasher_features <= 0:
raise ValueError("Configuration file must specify a non-zero value "
"for the option hasher_features when "
"feature_hasher is True.")
# produce warnings if hasher_features is set but feature_hasher
# is not set correctly
elif hasher_features > 0:
logger.warning("Ignoring hasher_features since feature_hasher is either"
" missing or set to False.")
if config.has_option("Input", "learners"):
learners_string = config.get("Input", "learners")
else:
raise ValueError("Configuration file does not contain list of learners "
"in [Input] section.")
learners = yaml.safe_load(fix_json(learners_string))
if len(learners) == 0:
raise ValueError("Configuration file contains an empty list of learners"
" in the [Input] section.")
elif len(set(learners)) < len(learners):
raise ValueError('Configuration file contains the same learner multiple'
' times, which is not currently supported. Please use'
' param_grids with tuning to find the optimal settings'
' for the learner.')
custom_learner_path = locate_file(config.get("Input", "custom_learner_path"),
config_dir)
# get the custom metric path, if specified, and locate it
custom_metric_path = locate_file(config.get("Input", "custom_metric_path"),
config_dir)
# get the featuresets
featuresets_string = config.get("Input", "featuresets")
featuresets = yaml.safe_load(fix_json(featuresets_string))
# ensure that featuresets is either a list of features or a list of lists
# of features
if not isinstance(featuresets, list) or not all(isinstance(fs, list) for fs
in featuresets):
raise ValueError("The featuresets parameter should be a list of "
"features or a list of lists of features. You "
f"specified: {featuresets}")
featureset_names = yaml.safe_load(fix_json(config.get("Input",
"featureset_names")))
# ensure that featureset_names is a list of strings, if specified
if featureset_names:
if (not isinstance(featureset_names, list) or
not all([isinstance(fs, str) for fs in
featureset_names])):
raise ValueError("The featureset_names parameter should be a list "
f"of strings. You specified: {featureset_names}")
# get the value for learning_curve_cv_folds and ensure
# that it's a list of the same length as the value of
# learners. If it's not specified, then we just assume
# that we are using 10 folds for each learner.
learning_curve_cv_folds_list_string = config.get("Input",
"learning_curve_cv_folds_list")
learning_curve_cv_folds_list = yaml.safe_load(fix_json(learning_curve_cv_folds_list_string))
if len(learning_curve_cv_folds_list) == 0:
learning_curve_cv_folds_list = [10] * len(learners)
else:
if (not isinstance(learning_curve_cv_folds_list, list) or
not all([isinstance(fold, int) for fold in learning_curve_cv_folds_list]) or
not len(learning_curve_cv_folds_list) == len(learners)):
raise ValueError("The learning_curve_cv_folds parameter should "
"be a list of integers of the same length as "
"the number of learners. You specified: "
f"{learning_curve_cv_folds_list}")
# get the value for learning_curve_train_sizes and ensure
# that it's a list of either integers (sizes) or
# floats (proportions). If it's not specified, then we just
# assume that we are using np.linspace(0.1, 1.0, 5).
learning_curve_train_sizes_string = config.get("Input", "learning_curve_train_sizes")
learning_curve_train_sizes = yaml.safe_load(fix_json(learning_curve_train_sizes_string))
if len(learning_curve_train_sizes) == 0:
learning_curve_train_sizes = np.linspace(0.1, 1.0, 5).tolist()
else:
if (not isinstance(learning_curve_train_sizes, list) or
not all([isinstance(size, int) or isinstance(size, float) for size in
learning_curve_train_sizes])):
raise ValueError("The learning_curve_train_sizes parameter should "
"be a list of integers or floats. You specified: "
f"{learning_curve_train_sizes}")
# do we need to shuffle the training data
do_shuffle = config.getboolean("Input", "shuffle")
fixed_parameter_list = yaml.safe_load(fix_json(config.get("Input",
"fixed_parameters")))
fixed_sampler_parameters = fix_json(config.get("Input",
"sampler_parameters"))
fixed_sampler_parameters = yaml.safe_load(fixed_sampler_parameters)
param_grid_list = yaml.safe_load(fix_json(config.get("Tuning", "param_grids")))
# read and normalize the value of `pos_label_str`
pos_label_str = safe_float(config.get("Tuning", "pos_label_str"))
if pos_label_str == '':
pos_label_str = None
# ensure that feature_scaling is specified only as one of the
# four available choices
feature_scaling = config.get("Input", "feature_scaling")
if feature_scaling not in VALID_FEATURE_SCALING_OPTIONS:
raise ValueError("Invalid value for feature_scaling parameter: "
f"{feature_scaling}")
suffix = config.get("Input", "suffix")
label_col = config.get("Input", "label_col")
id_col = config.get("Input", "id_col")
ids_to_floats = config.getboolean("Input", "ids_to_floats")
# if an external folds file is specified, then read it into a dictionary
folds_file = locate_file(config.get("Input", "folds_file"), config_dir)
num_cv_folds = config.getint("Input", "num_cv_folds")
specified_folds_mapping = None
specified_num_folds = None
if folds_file:
specified_folds_mapping = load_cv_folds(folds_file, ids_to_floats=ids_to_floats)
else:
# if no file is specified, then set the number of folds for cross-validation
specified_num_folds = num_cv_folds if num_cv_folds else 10
# whether or not to save the cv fold ids/models
save_cv_folds = config.getboolean("Output", "save_cv_folds")
save_cv_models = config.getboolean("Output", "save_cv_models")
# whether or not to do stratified cross validation
random_folds = config.getboolean("Input", "random_folds")
if random_folds:
if folds_file:
logger.warning('Specifying "folds_file" overrides "random_folds".')
do_stratified_folds = False
else:
do_stratified_folds = True
# get all the input paths and directories (without trailing slashes)
train_path = config.get("Input", "train_directory").rstrip(os.sep)
test_path = config.get("Input", "test_directory").rstrip(os.sep)
train_file = config.get("Input", "train_file")
test_file = config.get("Input", "test_file")
# make sure that featuresets is not an empty list unless
# train_file and test_file are specified
if not train_file and not test_file and (isinstance(featuresets, list) and
len(featuresets) == 0):
raise ValueError(
"The 'featuresets' parameters cannot be an empty list.")
# The user must specify either train_file or train_path, not both.
if not train_file and not train_path:
raise ValueError('Invalid [Input] parameters: either "train_file" or '
'"train_directory" must be specified in the '
'configuration file.')
# Either train_file or train_path must be specified.
if train_file and train_path:
raise ValueError('Invalid [Input] parameters: only either "train_file"'
' or "train_directory" can be specified in the '
'configuration file, not both.')
# Cannot specify both test_file and test_path
if test_file and test_path:
raise ValueError('Invalid [Input] parameters: only either "test_file" '
'or "test_directory" can be specified in the '
'configuration file, not both.')
# if train_file is specified, then assign its value to train_path
# this is a workaround to make this simple use case (a single train and
# test file) compatible with the existing architecture using
# featuresets
if train_file:
train_path = train_file
featuresets = [[f'train_{basename(train_file)}']]
suffix = ''
# if test_file is specified, then assign its value to test_path to
# enable compatibility with the pre-existing featuresets architecture
if test_file:
test_path = test_file
featuresets[0][0] += f'_test_{basename(test_file)}'
# make sure all the specified paths/files exist
train_path = locate_file(train_path, config_dir)
test_path = locate_file(test_path, config_dir)
# Get class mapping dictionary if specified
class_map_string = config.get("Input", "class_map")
original_class_map = yaml.safe_load(fix_json(class_map_string))
if original_class_map:
# Change class_map to map from originals to replacements instead of
# from replacement to list of originals
class_map = {}
for replacement, original_list in original_class_map.items():
for original in original_list:
class_map[original] = replacement
del original_class_map
else:
class_map = None
#####################
# 3. Output section #
#####################
probability = config.getboolean("Output", "probability")
pipeline = config.getboolean("Output", "pipeline")
# do we want to keep the predictions?
# make sure the predictions path exists and if not create it
try:
prediction_dir = locate_file(config.get("Output", "predictions"),
config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
prediction_dir = e.filename
os.makedirs(prediction_dir)
# make sure model path exists and if not, create it
try:
model_path = locate_file(config.get("Output", "models"), config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
model_path = e.filename
os.makedirs(model_path)
# make sure results path exists
try:
results_path = locate_file(config.get("Output", "results"), config_dir)
except IOError as e:
if e.errno == errno.ENOENT:
results_path = e.filename
os.makedirs(results_path)
# what are the output metrics?
output_metrics = config.get("Output", "metrics")
output_metrics = _parse_and_validate_metrics(output_metrics,
'metrics',
logger=logger)
# do we want to save the individual predictions from voting
# learner estimators?
save_votes = config.getboolean("Output", "save_votes")
#####################
# 4. Tuning section #
#####################
# do we need to run a grid search for the hyperparameters or are we just
# using the defaults?
do_grid_search = config.getboolean("Tuning", "grid_search")
# parse any provided grid objective functions
grid_objectives = config.get("Tuning", "objectives")
grid_objectives = _parse_and_validate_metrics(grid_objectives,
'objectives',
logger=logger)
# if we are doing learning curves , we don't care about
# grid search
if task == 'learning_curve' and do_grid_search:
do_grid_search = False
logger.warning("Grid search is not supported during "
"learning curve generation. Disabling.")
# Check if `param_grids` is specified, but `do_grid_search` is False
if param_grid_list and not do_grid_search:
logger.warning('Since "grid_search" is set to False, the specified'
' "param_grids" will be ignored.')
# Warn user about potential conflicts between parameter values
# specified in `fixed_parameter_list` and values specified in
# `param_grid_list` (or values passed in by default) if
# `do_grid_search` is True
if do_grid_search and fixed_parameter_list:
logger.warning('Note that "grid_search" is set to True and '
'"fixed_parameters" is also specified. If there '
'is a conflict between the grid search parameter'
' space and the fixed parameter values, the '
'fixed parameter values will take precedence.')
# minimum number of examples a feature must be nonzero in to be included
min_feature_count = config.getint("Tuning", "min_feature_count")
# if an external folds file was specified do we use the same folds file
# for the inner grid-search in cross-validate as well?
use_folds_file_for_grid_search = config.getboolean("Tuning",
"use_folds_file_for_grid_search")
# how many jobs should we run in parallel for grid search
grid_search_jobs = config.getint("Tuning", "grid_search_jobs")
if not grid_search_jobs:
grid_search_jobs = None
# how many folds should we run in parallel for grid search
grid_search_folds = config.getint("Tuning", "grid_search_folds")
# check whether the right things are set for the given task
if (task == 'evaluate' or task == 'predict') and not test_path:
raise ValueError('The test set must be set when task is evaluate or '
'predict.')
if task in ['cross_validate', 'evaluate', 'train']:
if do_grid_search and len(grid_objectives) == 0:
raise ValueError('Grid search is on. Either specify a list of tuning '
'objectives or set `grid_search` to `false` in the '
'Tuning section.')
if not do_grid_search and len(grid_objectives) > 0:
logger.warning('Since "grid_search" is set to False, any specified'
' "objectives" will be ignored.')
grid_objectives = []
if task in ['cross_validate', 'train', 'learning_curve'] and test_path:
raise ValueError('The test set should not be set when task is '
f'{task}.')
if task in ['train', 'predict'] and results_path and not do_grid_search:
raise ValueError('The results path should not be set when task is '
f'{task} and "grid_search" is set to False.')
if task == 'train' and not model_path:
raise ValueError('The model path should be set when task is train.')
if task in ['learning_curve', 'train'] and prediction_dir:
raise ValueError('The predictions path should not be set when task is '
f'{task}.')
if task == 'learning_curve' and model_path:
raise ValueError('The models path should not be set when task is '
'learning_curve.')
if task == 'learning_curve':
if len(grid_objectives) > 0:
raise ValueError("The \"objectives\" option is no longer supported"
" for the \"learning_curve\" task. Please use the"
" \"metrics\" option in the [Output] section "
"instead.")
if len(output_metrics) == 0:
raise ValueError('The "metrics" option must be set when '
'the task is "learning_curve".')
# if any of the objectives or metrics require probabilities to be output,
# probability must be specified as true
specified_probabilistic_metrics = PROBABILISTIC_METRICS.intersection(grid_objectives + output_metrics)
if specified_probabilistic_metrics and not probability:
raise ValueError("The 'probability' option must be 'true' "
" to compute the following: "
f"{list(specified_probabilistic_metrics)}.")
# set the folds appropriately based on the task:
# (a) if the task is `train`/`evaluate`/`predict` and if an external
# fold mapping is specified then use that mapping for grid search
# instead of the value contained in `grid_search_folds`.
# (b) if the task is `cross_validate` and an external fold mapping is specified
# then use that mapping for the outer CV loop and for the inner grid-search
# loop. However, if `use_folds_file_for_grid_search` is `False`, do not
# use the fold mapping for the inner loop.
cv_folds = None
if task in ['train', 'evaluate', 'predict'] and specified_folds_mapping:
grid_search_folds = specified_folds_mapping
# only print out the warning if the user actually wants to do grid search
if do_grid_search:
logger.warning("Specifying \"folds_file\" overrides both "
"explicit and default \"grid_search_folds\".")
if task == 'cross_validate':
cv_folds = specified_folds_mapping if specified_folds_mapping else specified_num_folds
if specified_folds_mapping:
logger.warning("Specifying \"folds_file\" overrides both "
"explicit and default \"num_cv_folds\".")
if use_folds_file_for_grid_search:
grid_search_folds = cv_folds
else:
# only print out the warning if the user wants to do grid search
if do_grid_search:
logger.warning("The specified \"folds_file\" will "
"not be used for inner grid search.")
if save_cv_models is True and not model_path:
raise ValueError("Output directory for models must be set if "
"\"save_cv_models\" is set to true.")
# Create feature set names if unspecified
if not featureset_names:
featureset_names = [_munge_featureset_name(x) for x in featuresets]
if len(featureset_names) != len(featuresets):
raise ValueError('Number of feature set names '
f'({len(featureset_names)}) does not match number of'
f' feature sets ({len(featuresets)}).')
# store training/test set names for later use
train_set_name = basename(train_path)
test_set_name = basename(test_path) if test_path else "cv"
return (experiment_name, task, sampler, fixed_sampler_parameters,
feature_hasher, hasher_features, id_col, label_col, train_set_name,
test_set_name, suffix, featuresets, do_shuffle, model_path,
do_grid_search, grid_objectives, probability, pipeline, results_path,
pos_label_str, feature_scaling, min_feature_count, folds_file,
grid_search_jobs, grid_search_folds, cv_folds, save_cv_folds,
save_cv_models, use_folds_file_for_grid_search, do_stratified_folds,
fixed_parameter_list, param_grid_list, featureset_names, learners,
prediction_dir, log_path, train_path, test_path, ids_to_floats,
class_map, custom_learner_path, custom_metric_path,
learning_curve_cv_folds_list, learning_curve_train_sizes,
output_metrics, save_votes)
|
30,158 |
def fetch_consumption(zone_key="CA-QC", session=None, logger=None):
data = _fetch_quebec_consumption()
for elem in reversed(data["details"]):
if "demandeTotal" in elem["valeurs"]:
return {
"zoneKey": zone_key,
"datetime": elem["date"],
"consumption": elem["valeurs"]["demandeTotal"],
"source": "hydroquebec.com",
}
|
def fetch_consumption(zone_key="CA-QC", session=None, logger=None):
data = _fetch_quebec_consumption()
for elem in reversed(data["details"]):
if "demandeTotal" in elem["valeurs"]:
return {
"zoneKey": zone_key,
"datetime": arrow.get(elem["date"]).datetime,
"consumption": elem["valeurs"]["demandeTotal"],
"source": "hydroquebec.com",
}
|
20,749 |
def createGroupOperationForArrange(nodes_to_arrange: List["SceneNode"],
build_volume: "BuildVolume",
fixed_nodes: Optional[List["SceneNode"]] = None,
factor = 10000,
add_new_nodes_in_scene: bool = False):
scene_root = Application.getInstance().getController().getScene().getRoot()
found_solution_for_all, node_items = findNodePlacement(nodes_to_arrange, build_volume, fixed_nodes, factor)
not_fit_count = 0
grouped_operation = GroupedOperation()
for node, node_item in zip(nodes_to_arrange, node_items):
if add_new_nodes_in_scene:
grouped_operation.addOperation(AddSceneNodeOperation(node, scene_root))
if node_item.binId() == 0:
# We found a spot for it
rotation_matrix = Matrix()
rotation_matrix.setByRotationAxis(node_item.rotation(), Vector(0, -1, 0))
grouped_operation.addOperation(RotateOperation(node, Quaternion.fromMatrix(rotation_matrix)))
grouped_operation.addOperation(TranslateOperation(node, Vector(node_item.translation().x() / factor, 0,
node_item.translation().y() / factor)))
else:
# We didn't find a spot
grouped_operation.addOperation(
TranslateOperation(node, Vector(200, node.getWorldPosition().y, -not_fit_count * 20),
set_position=True))
not_fit_count += 1
return grouped_operation, not_fit_count
|
def createGroupOperationForArrange(nodes_to_arrange: List["SceneNode"],
build_volume: "BuildVolume",
fixed_nodes: Optional[List["SceneNode"]] = None,
factor = 10000,
add_new_nodes_in_scene: bool = False):
scene_root = Application.getInstance().getController().getScene().getRoot()
found_solution_for_all, node_items = findNodePlacement(nodes_to_arrange, build_volume, fixed_nodes, factor)
not_fit_count = 0
grouped_operation = GroupedOperation()
for node, node_item in zip(nodes_to_arrange, node_items):
if add_new_nodes_in_scene:
grouped_operation.addOperation(AddSceneNodeOperation(node, scene_root))
if node_item.binId() == 0:
# We found a spot for it
rotation_matrix = Matrix()
rotation_matrix.setByRotationAxis(node_item.rotation(), Vector(0, -1, 0))
grouped_operation.addOperation(RotateOperation(node, Quaternion.fromMatrix(rotation_matrix)))
grouped_operation.addOperation(TranslateOperation(node, Vector(node_item.translation().x() / factor, 0,
node_item.translation().y() / factor)))
else:
# We didn't find a spot
grouped_operation.addOperation(
TranslateOperation(node, Vector(200, node.getWorldPosition().y, -not_fit_count * 20), set_position = True))
not_fit_count += 1
return grouped_operation, not_fit_count
|
17,390 |
def is_duck_dask_array(x):
return is_array_like(x) and isinstance(x, dask_array_type)
|
def is_duck_dask_array(x):
return is_array_like(x) and is_dask_collection(x)
|
39,357 |
def actor_to_mesh(actor, focal_point):
"""Convert a VTK actor to a threejs mesh or meshes."""
mapper = actor.GetMapper()
if mapper is None:
return
dataset = mapper.GetInputAsDataSet()
has_faces = True
if hasattr(dataset, 'faces'):
has_faces = np.any(dataset.faces)
prop = actor.GetProperty()
rep_type = prop.GetRepresentationAsString()
meshes = []
if rep_type == 'Surface' and has_faces:
surf = extract_surface_mesh(dataset)
add_attr = {}
if prop.GetEdgeVisibility():
# must offset polygons to have mesh render property with lines
add_attr = {'polygonOffset': True,
'polygonOffsetFactor': 1,
'polygonOffsetUnits': 1}
meshes.append(to_edge_mesh(surf, mapper, prop, use_edge_coloring=True))
meshes.append(to_surf_mesh(actor, surf, mapper, prop, add_attr))
elif rep_type == 'Points':
meshes.append(to_tjs_points(dataset, mapper, prop))
else: # wireframe
if has_faces:
surf = extract_surface_mesh(dataset)
mesh = to_edge_mesh(surf, mapper, prop, use_edge_coloring=False)
meshes.append(mesh)
elif np.any(dataset.lines):
mesh = to_edge_mesh(dataset, mapper, prop, use_edge_coloring=False,
use_lines=True)
meshes.append(mesh)
else: # pragma: no cover
warnings.warn(f'Empty or unsupported dataset attached to actor')
# the camera in three.js has no concept of a "focal point". In
# three.js, the scene is always centered at the origin, which
# serves as the focal point of the camera. Therefore, we need to
# shift the entire scene by the focal point of the pyvista camera
for mesh in meshes:
mesh.position = -focal_point[0], -focal_point[1], -focal_point[2]
return meshes
|
def actor_to_mesh(actor, focal_point):
"""Convert a VTK actor to a threejs mesh or meshes."""
mapper = actor.GetMapper()
if mapper is None:
return
dataset = mapper.GetInputAsDataSet()
has_faces = True
if hasattr(dataset, 'faces'):
has_faces = np.any(dataset.faces)
prop = actor.GetProperty()
rep_type = prop.GetRepresentationAsString()
meshes = []
if rep_type == 'Surface' and has_faces:
surf = extract_surface_mesh(dataset)
add_attr = {}
if prop.GetEdgeVisibility():
# must offset polygons to have mesh render property with lines
add_attr = {'polygonOffset': True,
'polygonOffsetFactor': 1,
'polygonOffsetUnits': 1}
meshes.append(to_edge_mesh(surf, mapper, prop, use_edge_coloring=True))
meshes.append(to_surf_mesh(actor, surf, mapper, prop, add_attr))
elif rep_type == 'Points':
meshes.append(to_tjs_points(dataset, mapper, prop))
else: # wireframe
if has_faces:
surf = extract_surface_mesh(dataset)
mesh = to_edge_mesh(surf, mapper, prop, use_edge_coloring=False)
meshes.append(mesh)
elif np.any(dataset.lines):
mesh = to_edge_mesh(dataset, mapper, prop, use_edge_coloring=False,
use_lines=True)
meshes.append(mesh)
else: # pragma: no cover
warnings.warn('Empty or unsupported dataset attached to actor')
# the camera in three.js has no concept of a "focal point". In
# three.js, the scene is always centered at the origin, which
# serves as the focal point of the camera. Therefore, we need to
# shift the entire scene by the focal point of the pyvista camera
for mesh in meshes:
mesh.position = -focal_point[0], -focal_point[1], -focal_point[2]
return meshes
|
38,412 |
def test_field_cut_off_axis_octree():
ds = fake_octree_ds()
cut = ds.all_data().cut_region('obj[("gas", "density")]>0.5')
p1 = OffAxisProjectionPlot(ds, [1, 0, 0], ("gas", "density"))
p2 = OffAxisProjectionPlot(ds, [1, 0, 0], ("gas", "density"), data_source=cut)
assert_equal(p2.frb[("gas", "density")].min() == 0.0, True) # Lots of zeros
assert_equal(
(p1.frb[("gas", "density")] == p2.frb[("gas", "density")]).all(), False
)
p3 = OffAxisSlicePlot(ds, [1, 0, 0], ("gas", "density"))
p4 = OffAxisSlicePlot(ds, [1, 0, 0], ("gas", "density"), data_source=cut)
assert_equal(
(p3.frb[("gas", "density")] == p4.frb[("gas", "density")]).all(), False
)
p4rho = p4.frb[("gas", "density")]
assert_equal(np.nanmin(p4rho[p4rho > 0.0]) >= 0.5, True)
|
def test_field_cut_off_axis_octree():
ds = fake_octree_ds()
cut = ds.all_data().cut_region('obj[("gas", "density")]>0.5')
p1 = OffAxisProjectionPlot(ds, [1, 0, 0], ("gas", "density"))
p2 = OffAxisProjectionPlot(ds, [1, 0, 0], ("gas", "density"), data_source=cut)
assert_equal(p2.frb[("gas", "density")].min() == 0.0, True) # Lots of zeros
assert_equal(
(p1.frb[("gas", "density")] == p2.frb[("gas", "density")]).all(), False
)
p3 = OffAxisSlicePlot(ds, [1, 0, 0], ("gas", "density"))
p4 = OffAxisSlicePlot(ds, [1, 0, 0], ("gas", "density"), data_source=cut)
assert_equal(
(p3.frb[("gas", "density")] == p4.frb[("gas", "density")]).all(), False
)
p4rho = p4.frb[("gas", "density")]
assert np.nanmin(p4rho[p4rho > 0.0]) >= 0.5
|
34,842 |
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = "new_domain.yml" if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
31,015 |
def install_nightly_packs(client, host, prints_manager, thread_index, packs_to_install, request_timeout=999999):
"""
Install content packs on nightly build.
Args:
client(demisto_client): The configured client to use.
host (str): The server URL.
prints_manager (ParallelPrintsManager): Print manager object.
thread_index (int): the thread index.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
Returns:
"""
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
message = 'Installing the following packs in server {}:\n{}'.format(host, packs_to_install_str)
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN, include_timestamp=True)
prints_manager.execute_thread_prints(thread_index)
# make the pack installation request
global PACK_INSTALL
PACK_INSTALL = False
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
while PACK_INSTALL is not True:
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
message = 'Packs were successfully installed!\n'
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN,
include_timestamp=True)
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
err_msg = f'Failed to install packs - with status code {status_code}\n{message}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
raise Exception(err_msg)
PACK_INSTALL = True
break
except Exception as e:
err_msg = f'The request to install packs has failed. Reason:\n{str(e)}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
PACK_INSTALL = False
pack_id = ''
message = str(e).split('\n')
# Get the pack ID of the failed pack.
for line in message:
if line.startswith('HTTP response body: '):
error_message = json.loads(line.split(': ', 1)[1])
error = error_message.get('error')
pack_id = error.split()[-2]
# Removed the bad pack from the list
packs = [pack for pack in packs_to_install if not (pack['id'] == pack_id)]
request_data = {
'packs': packs,
'ignoreWarnings': True
}
finally:
prints_manager.execute_thread_prints(thread_index)
|
def install_nightly_packs(client, host, prints_manager, thread_index, packs_to_install, request_timeout=999999):
"""
Install content packs on nightly build.
Args:
client(demisto_client): The configured client to use.
host (str): The server URL.
prints_manager (ParallelPrintsManager): Print manager object.
thread_index (int): the thread index.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
Returns:
"""
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
message = 'Installing the following packs in server {}:\n{}'.format(host, packs_to_install_str)
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN, include_timestamp=True)
prints_manager.execute_thread_prints(thread_index)
# make the pack installation request
global PACK_INSTALL
PACK_INSTALL = False
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
while not PACK_INSTALL:
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
message = 'Packs were successfully installed!\n'
prints_manager.add_print_job(message, print_color, thread_index, LOG_COLORS.GREEN,
include_timestamp=True)
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
err_msg = f'Failed to install packs - with status code {status_code}\n{message}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
raise Exception(err_msg)
PACK_INSTALL = True
break
except Exception as e:
err_msg = f'The request to install packs has failed. Reason:\n{str(e)}\n'
prints_manager.add_print_job(err_msg, print_error, thread_index, include_timestamp=True)
PACK_INSTALL = False
pack_id = ''
message = str(e).split('\n')
# Get the pack ID of the failed pack.
for line in message:
if line.startswith('HTTP response body: '):
error_message = json.loads(line.split(': ', 1)[1])
error = error_message.get('error')
pack_id = error.split()[-2]
# Removed the bad pack from the list
packs = [pack for pack in packs_to_install if not (pack['id'] == pack_id)]
request_data = {
'packs': packs,
'ignoreWarnings': True
}
finally:
prints_manager.execute_thread_prints(thread_index)
|
55,685 |
def build_from_cfg(cfg: Dict,
registry: Any,
default_args: Optional[Dict] = None) -> nn.Module:
"""Build a module from config dict when it is a class configuration, or
call a function from config dict when it is a function configuration.
Example:
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
>>> resnet = build_from_cfg(dict(type='Resnet'), MODELS)
>>> # Returns an instantiated object
>>> @MODELS.register_module()
>>> def resnet50():
>>> pass
>>> resnet = build_from_cfg(dict(type='resnet50'), MODELS)
>>> # Return a result of the calling function
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
object: The constructed object.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'type' not in cfg:
if default_args is None or 'type' not in default_args:
raise KeyError(
'`cfg` or `default_args` must contain the key "type", '
f'but got {cfg}\n{default_args}')
if not isinstance(registry, Registry):
raise TypeError('registry must be an mmcv.Registry object, '
f'but got {type(registry)}')
if not (isinstance(default_args, dict) or default_args is None):
raise TypeError('default_args must be a dict or None, '
f'but got {type(default_args)}')
args = cfg.copy()
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
obj_type = args.pop('type')
if isinstance(obj_type, str):
obj_cls = registry.get(obj_type)
if obj_cls is None:
raise KeyError(
f'{obj_type} is not in the {registry.name} registry')
elif inspect.isclass(obj_type) or inspect.isfunction(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
try:
return obj_cls(**args)
except Exception as e:
# Normal TypeError does not print class name.
raise type(e)(f'{obj_cls.__name__}: {e}')
|
def build_from_cfg(cfg: Dict,
registry: 'Registry',
default_args: Optional[Dict] = None) -> Any:
"""Build a module from config dict when it is a class configuration, or
call a function from config dict when it is a function configuration.
Example:
>>> MODELS = Registry('models')
>>> @MODELS.register_module()
>>> class ResNet:
>>> pass
>>> resnet = build_from_cfg(dict(type='Resnet'), MODELS)
>>> # Returns an instantiated object
>>> @MODELS.register_module()
>>> def resnet50():
>>> pass
>>> resnet = build_from_cfg(dict(type='resnet50'), MODELS)
>>> # Return a result of the calling function
Args:
cfg (dict): Config dict. It should at least contain the key "type".
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
object: The constructed object.
"""
if not isinstance(cfg, dict):
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
if 'type' not in cfg:
if default_args is None or 'type' not in default_args:
raise KeyError(
'`cfg` or `default_args` must contain the key "type", '
f'but got {cfg}\n{default_args}')
if not isinstance(registry, Registry):
raise TypeError('registry must be an mmcv.Registry object, '
f'but got {type(registry)}')
if not (isinstance(default_args, dict) or default_args is None):
raise TypeError('default_args must be a dict or None, '
f'but got {type(default_args)}')
args = cfg.copy()
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value)
obj_type = args.pop('type')
if isinstance(obj_type, str):
obj_cls = registry.get(obj_type)
if obj_cls is None:
raise KeyError(
f'{obj_type} is not in the {registry.name} registry')
elif inspect.isclass(obj_type) or inspect.isfunction(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
try:
return obj_cls(**args)
except Exception as e:
# Normal TypeError does not print class name.
raise type(e)(f'{obj_cls.__name__}: {e}')
|
20,536 |
def func_median(data, mask=None, map_clusters=None):
"""
Compute weighted median.
Code inspired from: https://gist.github.com/tinybike/d9ff1dad515b66cc0d87
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
data, mask = data.reshape(-1), mask.reshape(-1)
s_data, s_mask = map(np.array, zip(*sorted(zip(data, mask))))
midpoint = 0.5 * sum(s_mask)
if any(mask > midpoint):
w_median = (data[mask == np.max(mask)])[0]
else:
cs_mask = np.cumsum(s_mask)
idx = np.where(cs_mask <= midpoint)[0][-1]
if cs_mask[idx] == midpoint:
w_median = np.mean(s_data[idx:idx + 2])
else:
w_median = s_data[idx + 1]
return w_median, None
|
def func_median(data, mask, map_clusters=None):
"""
Compute weighted median.
Code inspired from: https://gist.github.com/tinybike/d9ff1dad515b66cc0d87
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
data, mask = data.reshape(-1), mask.reshape(-1)
s_data, s_mask = map(np.array, zip(*sorted(zip(data, mask))))
midpoint = 0.5 * sum(s_mask)
if any(mask > midpoint):
w_median = (data[mask == np.max(mask)])[0]
else:
cs_mask = np.cumsum(s_mask)
idx = np.where(cs_mask <= midpoint)[0][-1]
if cs_mask[idx] == midpoint:
w_median = np.mean(s_data[idx:idx + 2])
else:
w_median = s_data[idx + 1]
return w_median, None
|
16,496 |
def _get_config_schema(input_dict: dict[str, Any] = None) -> vol.Schema:
"""
Return schema defaults for init step based on user input/config dict.
Retain info already provided for future form views by setting them
as defaults in schema.
"""
if input_dict is None:
input_dict = {}
return vol.Schema(
{
vol.Required(
CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(CONF_HOST, default=input_dict.get(CONF_HOST)): str,
vol.Required(
CONF_DEVICE_CLASS,
default=input_dict.get(CONF_DEVICE_CLASS, DEFAULT_DEVICE_CLASS),
): vol.All(
str,
vol.Lower,
vol.In([MediaPlayerDeviceClass.TV, MediaPlayerDeviceClass.SPEAKER]),
),
vol.Optional(
CONF_ACCESS_TOKEN, default=input_dict.get(CONF_ACCESS_TOKEN, "")
): str,
},
extra=vol.REMOVE_EXTRA,
)
|
def _get_config_schema(input_dict: dict[str, Any] = None) -> vol.Schema:
"""
Return schema defaults for init step based on user input/config dict.
Retain info already provided for future form views by setting them
as defaults in schema.
"""
if input_dict is None:
input_dict = {}
return vol.Schema(
{
vol.Required(
CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(CONF_HOST, default=input_dict.get(CONF_HOST)): str,
vol.Required(
CONF_DEVICE_CLASS,
default=input_dict.get(CONF_DEVICE_CLASS, DEFAULT_DEVICE_CLASS),
): vol.All(
str,
vol.Lower,
vol.Coerce(MediaPlayerDeviceClass),
),
vol.Optional(
CONF_ACCESS_TOKEN, default=input_dict.get(CONF_ACCESS_TOKEN, "")
): str,
},
extra=vol.REMOVE_EXTRA,
)
|
35,135 |
def assert_partitioned_function(orig_mod, cmsisnn_mod, is_num_calls_same=True):
"""
if KCompiler attribute is missing, this function raises an assertion.
Parameters
----------
orig_mod : IRModule
Pre-partitioning module
cmsisnn_mod : IRModule
Post-partitioning module
is_num_calls_same: bool
Are number of CallNode(s) before and after partitioning expected to be the same
"""
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert any(attrs), "At least one function with external attributes was expected."
compilers = [
key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
]
assert any(compilers), "Module does not contain function for cmsisnn target."
if is_num_calls_same:
assert count_num_calls(orig_mod) == count_num_calls(
cmsisnn_mod
), "Number of calls changed during partitioning"
|
def assert_partitioned_function(orig_mod, cmsisnn_mod, num_call_nodes_expected_unchanged=True):
"""
if KCompiler attribute is missing, this function raises an assertion.
Parameters
----------
orig_mod : IRModule
Pre-partitioning module
cmsisnn_mod : IRModule
Post-partitioning module
is_num_calls_same: bool
Are number of CallNode(s) before and after partitioning expected to be the same
"""
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert any(attrs), "At least one function with external attributes was expected."
compilers = [
key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
]
assert any(compilers), "Module does not contain function for cmsisnn target."
if is_num_calls_same:
assert count_num_calls(orig_mod) == count_num_calls(
cmsisnn_mod
), "Number of calls changed during partitioning"
|
30,637 |
def list_watchlists_command(client: Client) -> CommandResults:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
result = client.list_watchlists_request()
watchlists = result.get('results')
if not watchlists:
return 'No watchlists were found.'
for watchlist in watchlists:
contents.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Description': watchlist.get('description'),
'Tags_enabled': watchlist.get('tags_enabled'),
'Alerts_enabled': watchlist.get('alerts_enabled'),
'create_timestamp': convert_unix_to_timestamp(watchlist.get('create_timestamp')),
'Last_update_timestamp': convert_unix_to_timestamp(watchlist.get('last_update_timestamp')),
'Report_ids': watchlist.get('report_ids'),
'Classifier': watchlist.get('classifier')
})
readable_output = tableToMarkdown('Watchlists list ', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
|
def list_watchlists_command(client: Client) -> CommandResults:
contents = []
headers = ['ID', 'Name', 'Description', 'create_timestamp', 'Alerts_enabled', 'Tags_enabled', 'Report_ids',
'Last_update_timestamp', 'Classifier']
result = client.list_watchlists_request()
watchlists = result.get('results', [])
if not watchlists:
return 'No watchlists were found.'
for watchlist in watchlists:
contents.append({
'Name': watchlist.get('name'),
'ID': watchlist.get('id'),
'Description': watchlist.get('description'),
'Tags_enabled': watchlist.get('tags_enabled'),
'Alerts_enabled': watchlist.get('alerts_enabled'),
'create_timestamp': convert_unix_to_timestamp(watchlist.get('create_timestamp')),
'Last_update_timestamp': convert_unix_to_timestamp(watchlist.get('last_update_timestamp')),
'Report_ids': watchlist.get('report_ids'),
'Classifier': watchlist.get('classifier')
})
readable_output = tableToMarkdown('Watchlists list ', contents, headers, removeNull=True)
results = CommandResults(
outputs_prefix='CarbonBlackEEDR.Watchlist',
outputs_key_field='id',
outputs=contents,
readable_output=readable_output,
raw_response=result
)
return results
|
17,547 |
def _get_form_field_from_name_or_xpath(xform, name=None, xpath=None):
"""
Given a name or xpath, get xform field
"""
if name:
return get_field_from_field_name(name, xform)
elif xpath:
return get_field_from_field_xpath(xpath, xform)
|
def _get_form_field_from_name_or_xpath(xform, name=None, xpath=None):
"""
Given a name or xpath, get xform field
"""
return get_field_from_field_xpath(name or xpath, xform)
|
37,212 |
def active_backend():
"""Get the backend of the currently active context.
Returns:
BaseBackend: The active backend in the currently active builder context.
Raises:
exceptions.PulseError: If the builder does not have a backend set.
"""
builder = _active_builder().backend
if builder is None:
raise BackendNotSet(
'This function requires the active builder to '
'have a "backend" set.')
return builder
|
def active_backend():
"""Get the backend of the currently active context.
Returns:
BaseBackend: The active backend in the currently active builder context.
Raises:
exceptions.PulseError: If the builder does not have a backend set.
"""
builder = _active_builder().backend
if builder is None:
raise BackendNotSet(
'This function requires the active builder to '
'have a "backend" set.')
return builder
|
4,551 |
def _check_param_grid(estimator, X, y, param_grid=None):
"""Check param_grid and return sensible default if param_grid is None.
Parameters
-----------
estimator: str, optional
The estimator to choose among: 'svc', 'svc_l2', 'svc_l1', 'logistic',
'logistic_l1', 'logistic_l2', 'ridge', 'ridge_classifier',
'ridge_regressor', and 'svr'. Note that the 'svc' and 'svc_l2';
'logistic' and 'logistic_l2'; 'ridge' and 'ridge_regressor'
correspond to the same estimator. Default 'svc'.
X: list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html
Data on which model is to be fitted. If this is a list,
the affine is considered the same for all.
y: array or list of shape (n_samples)
The dependent variable (age, sex, IQ, yes/no, etc.).
Target variable to predict. Must have exactly as many elements as
3D images in niimg.
param_grid: dict of str to sequence, or sequence of such. Default None
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See scikit-learn documentation for more information.
For Dummy estimators, parameter grid defaults to empty as these
estimators are unreal there exists no interesting parameters to grid
search.
Returns
-------
param_grid: dict of str to sequence, or sequence of such. Sensible default
dict has size 1 for linear models.
"""
if param_grid is None:
param_grid = {}
# define loss function
if isinstance(estimator, LogisticRegression):
loss = 'log'
elif isinstance(estimator,
(LinearSVC, RidgeCV, RidgeClassifierCV, SVR)):
loss = 'squared_hinge'
elif isinstance(estimator,
(DummyClassifier, DummyRegressor)):
pass
else:
raise ValueError(
"Invalid estimator. The supported estimators are: {}".format(
list(SUPPORTED_ESTIMATORS.keys()))
)
# define sensible default for different types of estimators
if hasattr(estimator, 'penalty') and (estimator.penalty == 'l1'):
min_c = l1_min_c(X, y, loss=loss)
else:
min_c = 0.5
if not isinstance(estimator, (RidgeCV, RidgeClassifierCV,
DummyClassifier, DummyRegressor)):
param_grid['C'] = np.array([2, 20, 200]) * min_c
else:
param_grid = {}
return param_grid
|
def _check_param_grid(estimator, X, y, param_grid=None):
"""Check param_grid and return sensible default if param_grid is None.
Parameters
-----------
estimator: str, optional
The estimator to choose among: 'svc', 'svc_l2', 'svc_l1', 'logistic',
'logistic_l1', 'logistic_l2', 'ridge', 'ridge_classifier',
'ridge_regressor', and 'svr'. Note that the 'svc' and 'svc_l2';
'logistic' and 'logistic_l2'; 'ridge' and 'ridge_regressor'
correspond to the same estimator. Default 'svc'.
X: list of Niimg-like objects
See http://nilearn.github.io/manipulating_images/input_output.html
Data on which model is to be fitted. If this is a list,
the affine is considered the same for all.
y: array or list of shape (n_samples)
The dependent variable (age, sex, IQ, yes/no, etc.).
Target variable to predict. Must have exactly as many elements as
3D images in niimg.
param_grid: dict of str to sequence, or sequence of such. Default None
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See scikit-learn documentation for more information.
For Dummy estimators, parameter grid defaults to empty as these
estimators do not have hyperparameters to grid
search.
Returns
-------
param_grid: dict of str to sequence, or sequence of such. Sensible default
dict has size 1 for linear models.
"""
if param_grid is None:
param_grid = {}
# define loss function
if isinstance(estimator, LogisticRegression):
loss = 'log'
elif isinstance(estimator,
(LinearSVC, RidgeCV, RidgeClassifierCV, SVR)):
loss = 'squared_hinge'
elif isinstance(estimator,
(DummyClassifier, DummyRegressor)):
pass
else:
raise ValueError(
"Invalid estimator. The supported estimators are: {}".format(
list(SUPPORTED_ESTIMATORS.keys()))
)
# define sensible default for different types of estimators
if hasattr(estimator, 'penalty') and (estimator.penalty == 'l1'):
min_c = l1_min_c(X, y, loss=loss)
else:
min_c = 0.5
if not isinstance(estimator, (RidgeCV, RidgeClassifierCV,
DummyClassifier, DummyRegressor)):
param_grid['C'] = np.array([2, 20, 200]) * min_c
else:
param_grid = {}
return param_grid
|
57,518 |
def is_typeddict_special(type_: Type[Any]) -> bool:
"""
Check if type is a TypedDict special form (Required or NotRequired).
"""
return type_ is TypedDictRequired or type_ is TypedDictNotRequired
|
def is_typeddict_special(type_: Any) -> bool:
"""
Check if type is a TypedDict special form (Required or NotRequired).
"""
return type_ is not None and (type_ is TypedDictRequired or type_ is TypedDictNotRequired)
|
26,306 |
def invalid_ignored_if_false(dist, attr, value):
if not value:
warnings.warn("{attr} is ignored")
return
raise DistutilsSetupError(f"{attr} is invalid if it is set to a true value.")
|
def invalid_ignored_if_false(dist, attr, value):
if not value:
warnings.warn("{attr} is ignored", DistDeprecationWarning)
return
raise DistutilsSetupError(f"{attr} is invalid if it is set to a true value.")
|
31,750 |
def test_custom_indicator(client: Client) -> CommandResults:
# Command using a custom indicator example
result = client.baseintegration_dummy("test")
score = Common.DBotScore.GOOD
indicator_value = 'custom_value'
# Create a DBotScore object
# Give it an indicator_type of DBotScoreType.CUSTOM
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=DBotScoreType.CUSTOM,
integration_name='DummyIntegration',
score=score
)
# Create a data dictionary, which is the data of the indicator
data = {
'param1': 'value1',
'param2': 'value2',
}
# Create the CustomIndicator
custom_indicator = Common.CustomIndicator(
indicator_type='MyCustomIndicator',
dbot_score=dbot_score,
value=indicator_value,
data=data,
context_prefix='custom',
)
# Return a CommandResults object containing the CustomIndicator object created
return CommandResults(
readable_output='custom_value',
outputs=result,
outputs_prefix='Demo.Result',
outputs_key_field='test_key_field',
indicator=custom_indicator
)
|
def test_custom_indicator(client: Client) -> CommandResults:
# Command using a custom indicator example
result = client.baseintegration_dummy("test")
score = Common.DBotScore.GOOD
indicator_value = 'custom_value'
# Create a DBotScore object
# Give it an indicator_type of DBotScoreType.CUSTOM
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=DBotScoreType.CUSTOM,
integration_name='DummyIntegration',
score=score
)
# Create a data dictionary, which is the data of the indicator
data = {
'param1': 'value1',
'param2': 'value2',
}
# Create the CustomIndicator
custom_indicator = Common.CustomIndicator(
indicator_type='MyCustomIndicator',
dbot_score=dbot_score,
value=indicator_value,
data=data,
context_prefix='custom',
)
# Return a CommandResults object containing the CustomIndicator object created
return CommandResults(
readable_output='custom_value',
outputs=result,
outputs_prefix='Demo.Result',
outputs_key_field='test_key_field',
indicator=custom_indicator,
)
|
39,137 |
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values."
"The value for `n_mels` ({}) may be set too high."
"Or, the value for `n_freqs` ({}) may be set too low.".format(n_mels, n_freqs)
)
return fb
|
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values."
"The value for `n_mels` ({}) may be set too high."
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
|
58,937 |
def create_mqb_hud_control(packer, bus, enabled, steering_pressed, hud_alert, left_lane_visible, right_lane_visible,
ldw_lane_warning_left, ldw_lane_warning_right, ldw_side_dlc_tlc, ldw_dlc, ldw_tlc,
standstill, left_lane_depart, right_lane_depart):
if enabled:
left_lane_hud = 3 if left_lane_visible and not standstill else 2
right_lane_hud = 3 if right_lane_visible and not standstill else 2
else:
left_lane_hud = 1
right_lane_hud = 1
left_lane_hud = 3 if left_lane_depart else left_lane_hud
right_lane_hud = 3 if right_lane_depart else right_lane_hud
values = {
"LDW_Status_LED_gelb": 1 if enabled and steering_pressed else 0,
"LDW_Status_LED_gruen": 1 if enabled and not steering_pressed else 0,
"LDW_Lernmodus_links": left_lane_hud,
"LDW_Lernmodus_rechts": right_lane_hud,
"LDW_Texte": hud_alert,
"LDW_SW_Warnung_links": ldw_lane_warning_left,
"LDW_SW_Warnung_rechts": ldw_lane_warning_right,
"LDW_Seite_DLCTLC": ldw_side_dlc_tlc,
"LDW_DLC": ldw_dlc,
"LDW_TLC": ldw_tlc
}
return packer.make_can_msg("LDW_02", bus, values)
|
def create_mqb_hud_control(packer, bus, enabled, steering_pressed, hud_alert, left_lane_visible, right_lane_visible,
ldw_lane_warning_left, ldw_lane_warning_right, ldw_side_dlc_tlc, ldw_dlc, ldw_tlc,
standstill, left_lane_depart, right_lane_depart):
if enabled:
left_lane_hud = 2 if left_lane_visible and not standstill else 1
right_lane_hud = 2 if right_lane_visible and not standstill else 1
else:
left_lane_hud = 1
right_lane_hud = 1
left_lane_hud = 3 if left_lane_depart else left_lane_hud
right_lane_hud = 3 if right_lane_depart else right_lane_hud
values = {
"LDW_Status_LED_gelb": 1 if enabled and steering_pressed else 0,
"LDW_Status_LED_gruen": 1 if enabled and not steering_pressed else 0,
"LDW_Lernmodus_links": left_lane_hud,
"LDW_Lernmodus_rechts": right_lane_hud,
"LDW_Texte": hud_alert,
"LDW_SW_Warnung_links": ldw_lane_warning_left,
"LDW_SW_Warnung_rechts": ldw_lane_warning_right,
"LDW_Seite_DLCTLC": ldw_side_dlc_tlc,
"LDW_DLC": ldw_dlc,
"LDW_TLC": ldw_tlc
}
return packer.make_can_msg("LDW_02", bus, values)
|
31,070 |
def set_user_status(username, status):
result = admin_api.get_users_by_name(username)
user_id = result[0]["user_id"]
res = admin_api.update_user(user_id, status=status)
demisto.results('User ' + username + 'was set to status ' + status)
|
def set_user_status(username, status):
user_id = get_user_id(username)
res = admin_api.update_user(user_id, status=status)
demisto.results('User ' + username + 'was set to status ' + status)
|
8,714 |
def test_plugin_load_entrypoint(tmpdir):
root = tmpdir.mkdir('loader_mods')
mod_file = root.join('file_mod.py')
mod_file.write(MOCK_MODULE_CONTENT)
# generate setuptools Distribution object
distrib = pkg_resources.Distribution(root.strpath)
sys.path.append(root.strpath)
# load the entrypoint
try:
entrypoint = pkg_resources.EntryPoint(
'test_plugin', 'file_mod', dist=distrib)
plugin = plugins.handlers.EntrypointPluginHandler(entrypoint)
plugin.load()
finally:
sys.path.remove(root.strpath)
assert plugin.name == 'test_plugin'
test_mod = plugin._module
assert hasattr(test_mod, 'first_command')
assert hasattr(test_mod, 'second_command')
assert hasattr(test_mod, 'interval5s')
assert hasattr(test_mod, 'interval10s')
assert hasattr(test_mod, 'example_url')
assert hasattr(test_mod, 'shutdown')
assert hasattr(test_mod, 'ignored')
|
def test_plugin_load_entrypoint(tmpdir):
root = tmpdir.mkdir('loader_mods')
mod_file = root.join('file_mod.py')
mod_file.write(MOCK_MODULE_CONTENT)
# generate setuptools Distribution object
distrib = pkg_resources.Distribution(root.strpath)
sys.path.append(root.strpath)
# load the entry point
try:
entrypoint = pkg_resources.EntryPoint(
'test_plugin', 'file_mod', dist=distrib)
plugin = plugins.handlers.EntrypointPluginHandler(entrypoint)
plugin.load()
finally:
sys.path.remove(root.strpath)
assert plugin.name == 'test_plugin'
test_mod = plugin._module
assert hasattr(test_mod, 'first_command')
assert hasattr(test_mod, 'second_command')
assert hasattr(test_mod, 'interval5s')
assert hasattr(test_mod, 'interval10s')
assert hasattr(test_mod, 'example_url')
assert hasattr(test_mod, 'shutdown')
assert hasattr(test_mod, 'ignored')
|
27,664 |
def xcorr_pick_family(family, stream, shift_len=0.2, min_cc=0.4,
horizontal_chans=['E', 'N', '1', '2'],
vertical_chans=['Z'], cores=1, interpolate=False,
plot=False, plotdir=None, npy=False, npydir=None):
"""
Compute cross-correlation picks for detections in a family.
:type family: `eqcorrscan.core.match_filter.family.Family`
:param family: Family to calculate correlation picks for.
:type stream: `obspy.core.stream.Stream`
:param stream:
Data stream containing data for all (or a subset of) detections in
the Family
:type shift_len: float
:param shift_len:
Shift length allowed for the pick in seconds, will be plus/minus this
amount - default=0.2
:type min_cc: float
:param min_cc:
Minimum cross-correlation value to be considered a pick, default=0.4.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type cores: int
:param cores:
Number of cores to use in parallel processing, defaults to one.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type plot: bool
:param plot:
To generate a plot for every detection or not, defaults to False
:type plotdir: str
:param plotdir:
Path to plotting folder, plots will be output here.
:type npy: bool
:param npy:
To generate a binary file in NumPy for every detection or not,
defaults to False
:type npydir: str
:param npydir:
Path to saving folder, NumPy files will be output here.
:return: Dictionary of picked events keyed by detection id.
"""
picked_dict = {}
delta = family.template.st[0].stats.delta
detect_streams_dict = _prepare_data(
family=family, detect_data=stream, shift_len=shift_len)
detection_ids = list(detect_streams_dict.keys())
detect_streams = [detect_streams_dict[detection_id]
for detection_id in detection_ids]
if len(detect_streams) == 0:
Logger.warning("No appropriate data found, check your family and "
"detections - make sure seed ids match")
return picked_dict
if len(detect_streams) != len(family):
Logger.warning("Not all detections have matching data. "
"Proceeding anyway. HINT: Make sure SEED IDs match")
# Correlation function needs a list of streams, we need to maintain order.
ccc, chans = _concatenate_and_correlate(
streams=detect_streams, template=family.template.st, cores=cores)
for i, detection_id in enumerate(detection_ids):
detection = [d for d in family.detections if d.id == detection_id][0]
correlations = ccc[i]
if npy:
os.makedirs(npydir, exist_ok=True)
np.save(os.path.join(npydir, detection_id+'.npy'), correlations)
picked_chans = chans[i]
detect_stream = detect_streams_dict[detection_id]
checksum, cccsum, used_chans = 0.0, 0.0, 0
event = Event()
for correlation, stachan in zip(correlations, picked_chans):
if not stachan.used:
continue
tr = detect_stream.select(
station=stachan.channel[0], channel=stachan.channel[1])[0]
if interpolate:
shift, cc_max = _xcorr_interp(correlation, dt=delta)
else:
cc_max = np.amax(correlation)
shift = np.argmax(correlation) * delta
if np.isnan(cc_max): # pragma: no cover
Logger.error(
'Problematic trace, no cross correlation possible')
continue
picktime = tr.stats.starttime + shift
checksum += cc_max
used_chans += 1
if cc_max < min_cc:
Logger.debug('Correlation of {0} is below threshold, not '
'using'.format(cc_max))
continue
cccsum += cc_max
phase = None
if stachan.channel[1][-1] in vertical_chans:
phase = 'P'
elif stachan.channel[1][-1] in horizontal_chans:
phase = 'S'
_waveform_id = WaveformStreamID(seed_string=tr.id)
event.picks.append(Pick(
waveform_id=_waveform_id, time=picktime,
method_id=ResourceIdentifier('EQcorrscan'), phase_hint=phase,
creation_info='eqcorrscan.core.lag_calc',
evaluation_mode='automatic',
comments=[Comment(text='cc_max={0}'.format(cc_max))]))
event.resource_id = ResourceIdentifier(detection_id)
event.comments.append(Comment(text="detect_val={0}".format(cccsum)))
# Add template-name as comment to events
event.comments.append(Comment(
text="Detected using template: {0}".format(family.template.name)))
if used_chans == detection.no_chans: # pragma: no cover
if detection.detect_val is not None and\
checksum - detection.detect_val < -(0.3 * detection.detect_val):
msg = ('lag-calc has decreased cccsum from %f to %f - '
% (detection.detect_val, checksum))
Logger.error(msg)
continue
else:
Logger.warning(
'Cannot check if cccsum is better, used {0} channels for '
'detection, but {1} are used here'.format(
detection.no_chans, used_chans))
picked_dict.update({detection_id: event})
if plot: # pragma: no cover
for i, event in enumerate(picked_dict.values()):
if len(event.picks) == 0:
continue
plot_stream = detect_streams[i].copy()
template_plot = family.template.st.copy()
pick_stachans = [(pick.waveform_id.station_code,
pick.waveform_id.channel_code)
for pick in event.picks]
for tr in plot_stream:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
plot_stream.remove(tr)
for tr in template_plot:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
template_plot.remove(tr)
if plotdir is not None:
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
savefile = "{plotdir}/{rid}.png".format(
plotdir=plotdir, rid=event.resource_id.id)
plot_repicked(template=template_plot, picks=event.picks,
det_stream=plot_stream, show=False, save=True,
savefile=savefile)
else:
plot_repicked(template=template_plot, picks=event.picks,
det_stream=plot_stream, show=True)
return picked_dict
|
def xcorr_pick_family(family, stream, shift_len=0.2, min_cc=0.4,
horizontal_chans=['E', 'N', '1', '2'],
vertical_chans=['Z'], cores=1, interpolate=False,
plot=False, plotdir=None, npy=False, npydir=None):
"""
Compute cross-correlation picks for detections in a family.
:type family: `eqcorrscan.core.match_filter.family.Family`
:param family: Family to calculate correlation picks for.
:type stream: `obspy.core.stream.Stream`
:param stream:
Data stream containing data for all (or a subset of) detections in
the Family
:type shift_len: float
:param shift_len:
Shift length allowed for the pick in seconds, will be plus/minus this
amount - default=0.2
:type min_cc: float
:param min_cc:
Minimum cross-correlation value to be considered a pick, default=0.4.
:type horizontal_chans: list
:param horizontal_chans:
List of channel endings for horizontal-channels, on which S-picks will
be made.
:type vertical_chans: list
:param vertical_chans:
List of channel endings for vertical-channels, on which P-picks will
be made.
:type cores: int
:param cores:
Number of cores to use in parallel processing, defaults to one.
:type interpolate: bool
:param interpolate:
Interpolate the correlation function to achieve sub-sample precision.
:type plot: bool
:param plot:
To generate a plot for every detection or not, defaults to False
:type plotdir: str
:param plotdir:
Path to plotting folder, plots will be output here.
:type npy: bool
:param npy:
To generate a binary file in NumPy for every detection or not,
defaults to False
:type npydir: str
:param npydir:
Path to saving folder, NumPy files will be output here.
:return: Dictionary of picked events keyed by detection id.
"""
picked_dict = {}
delta = family.template.st[0].stats.delta
detect_streams_dict = _prepare_data(
family=family, detect_data=stream, shift_len=shift_len)
detection_ids = list(detect_streams_dict.keys())
detect_streams = [detect_streams_dict[detection_id]
for detection_id in detection_ids]
if len(detect_streams) == 0:
Logger.warning("No appropriate data found, check your family and "
"detections - make sure seed ids match")
return picked_dict
if len(detect_streams) != len(family):
Logger.warning("Not all detections have matching data. "
"Proceeding anyway. HINT: Make sure SEED IDs match")
# Correlation function needs a list of streams, we need to maintain order.
ccc, chans = _concatenate_and_correlate(
streams=detect_streams, template=family.template.st, cores=cores)
for i, detection_id in enumerate(detection_ids):
detection = [d for d in family.detections if d.id == detection_id][0]
correlations = ccc[i]
if npy:
os.makedirs(npydir, exist_ok=True)
np.save(os.path.join(npydir, f"{detection_id}.npy"), correlations)
picked_chans = chans[i]
detect_stream = detect_streams_dict[detection_id]
checksum, cccsum, used_chans = 0.0, 0.0, 0
event = Event()
for correlation, stachan in zip(correlations, picked_chans):
if not stachan.used:
continue
tr = detect_stream.select(
station=stachan.channel[0], channel=stachan.channel[1])[0]
if interpolate:
shift, cc_max = _xcorr_interp(correlation, dt=delta)
else:
cc_max = np.amax(correlation)
shift = np.argmax(correlation) * delta
if np.isnan(cc_max): # pragma: no cover
Logger.error(
'Problematic trace, no cross correlation possible')
continue
picktime = tr.stats.starttime + shift
checksum += cc_max
used_chans += 1
if cc_max < min_cc:
Logger.debug('Correlation of {0} is below threshold, not '
'using'.format(cc_max))
continue
cccsum += cc_max
phase = None
if stachan.channel[1][-1] in vertical_chans:
phase = 'P'
elif stachan.channel[1][-1] in horizontal_chans:
phase = 'S'
_waveform_id = WaveformStreamID(seed_string=tr.id)
event.picks.append(Pick(
waveform_id=_waveform_id, time=picktime,
method_id=ResourceIdentifier('EQcorrscan'), phase_hint=phase,
creation_info='eqcorrscan.core.lag_calc',
evaluation_mode='automatic',
comments=[Comment(text='cc_max={0}'.format(cc_max))]))
event.resource_id = ResourceIdentifier(detection_id)
event.comments.append(Comment(text="detect_val={0}".format(cccsum)))
# Add template-name as comment to events
event.comments.append(Comment(
text="Detected using template: {0}".format(family.template.name)))
if used_chans == detection.no_chans: # pragma: no cover
if detection.detect_val is not None and\
checksum - detection.detect_val < -(0.3 * detection.detect_val):
msg = ('lag-calc has decreased cccsum from %f to %f - '
% (detection.detect_val, checksum))
Logger.error(msg)
continue
else:
Logger.warning(
'Cannot check if cccsum is better, used {0} channels for '
'detection, but {1} are used here'.format(
detection.no_chans, used_chans))
picked_dict.update({detection_id: event})
if plot: # pragma: no cover
for i, event in enumerate(picked_dict.values()):
if len(event.picks) == 0:
continue
plot_stream = detect_streams[i].copy()
template_plot = family.template.st.copy()
pick_stachans = [(pick.waveform_id.station_code,
pick.waveform_id.channel_code)
for pick in event.picks]
for tr in plot_stream:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
plot_stream.remove(tr)
for tr in template_plot:
if (tr.stats.station, tr.stats.channel) \
not in pick_stachans:
template_plot.remove(tr)
if plotdir is not None:
if not os.path.isdir(plotdir):
os.makedirs(plotdir)
savefile = "{plotdir}/{rid}.png".format(
plotdir=plotdir, rid=event.resource_id.id)
plot_repicked(template=template_plot, picks=event.picks,
det_stream=plot_stream, show=False, save=True,
savefile=savefile)
else:
plot_repicked(template=template_plot, picks=event.picks,
det_stream=plot_stream, show=True)
return picked_dict
|
1,593 |
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
nan_mask = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
2,239 |
def _increase_reg(reg):
"""
Increase regularization factor by factor of 10.
Parameters
----------
reg: float
Current regularization factor.
Returns
-------
reg : float
Increased regularization
"""
if reg == 0:
reg = 1e-06
else:
reg = reg * 10
return reg
|
def _increase_reg(reg):
"""
Increase regularization factor by factor of 10.
Parameters
----------
reg: float
Current regularization factor.
Returns
-------
reg : float
Increased regularization
"""
if reg == 0:
reg = 1e-06
else:
reg *= 10
return reg
|
27,739 |
def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).resolve()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").exists():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).resolve()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
|
def determine_setup(
inifile: Optional[str],
args: Sequence[str],
rootdir_cmd_arg: Optional[str] = None,
config: Optional["Config"] = None,
) -> Tuple[Path, Optional[Path], Dict[str, Union[str, List[str]]]]:
rootdir = None
dirs = get_dirs_from_args(args)
if inifile:
inipath_ = Path(inifile).resolve()
inipath = inipath_ # type: Optional[Path]
inicfg = load_config_dict_from_file(inipath_) or {}
if rootdir_cmd_arg is None:
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inipath, inicfg = locate_config([ancestor])
if rootdir is None and rootdir_cmd_arg is None:
for possible_rootdir in itertools.chain(
(ancestor,), reversed(ancestor.parents)
):
if (possible_rootdir / "setup.py").is_file():
rootdir = possible_rootdir
break
else:
if dirs != [ancestor]:
rootdir, inipath, inicfg = locate_config(dirs)
if rootdir is None:
if config is not None:
cwd = config.invocation_params.dir
else:
cwd = Path.cwd()
rootdir = get_common_ancestor([cwd, ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir = Path(os.path.expandvars(rootdir_cmd_arg)).resolve()
if not rootdir.is_dir():
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir
)
)
assert rootdir is not None
return rootdir, inipath, inicfg or {}
|
31,782 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
args = demisto.args()
params = demisto.params()
api_key = params.get('apikey')
api_key_id = params.get('apikey_id')
base_url = urljoin(params['url'], '/public_api/v1')
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
nonce = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
auth_key = "%s%s%s" % (api_key, nonce, timestamp)
api_key_hash = hashlib.sha256(auth_key.encode("utf-8")).hexdigest()
headers = {
"x-xdr-timestamp": str(timestamp),
"x-xdr-nonce": nonce,
"x-xdr-auth-id": str(api_key_id),
"Authorization": api_key_hash
}
client = Client(
base_url=base_url,
verify=verify_cert,
headers=headers,
proxy=proxy)
generic_commands = init_generic_commands()
built_in_commands = init_built_in_commands()
if command in generic_commands:
return_results(generic_commands[command](client, args))
elif command in built_in_commands:
return_results(get_built_in_query_results_polling_command(client, args))
else:
raise NotImplementedError(f'Command {command} does not exist.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
finally:
get_integration_context().clear()
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
args = demisto.args()
params = demisto.params()
api_key = params.get('apikey')
api_key_id = params.get('apikey_id')
base_url = urljoin(params['url'], '/public_api/v1')
verify_cert = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
nonce = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
auth_key = f'{api_key}{nonce}{timestamp}'
api_key_hash = hashlib.sha256(auth_key.encode("utf-8")).hexdigest()
headers = {
"x-xdr-timestamp": str(timestamp),
"x-xdr-nonce": nonce,
"x-xdr-auth-id": str(api_key_id),
"Authorization": api_key_hash
}
client = Client(
base_url=base_url,
verify=verify_cert,
headers=headers,
proxy=proxy)
generic_commands = init_generic_commands()
built_in_commands = init_built_in_commands()
if command in generic_commands:
return_results(generic_commands[command](client, args))
elif command in built_in_commands:
return_results(get_built_in_query_results_polling_command(client, args))
else:
raise NotImplementedError(f'Command {command} does not exist.')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError: {str(e)}')
finally:
get_integration_context().clear()
|
57,672 |
def get_github_actions_usage():
""" List github actions workflows usage of private repositories.
"""
command_args = demisto.args()
owner_name = command_args.get('owner', '')
usage_result = []
private_repositories = list_owner_repositories(owner_name=owner_name, repo_type="private")
for repository_name in private_repositories:
repository_workflows = list_repository_workflows(owner_name=owner_name, repository_name=repository_name)
for workflow in repository_workflows:
workflow_id = workflow.get('id', '')
workflow_name = workflow.get('name', '')
workflow_usage = get_workflow_usage(owner_name=owner_name, repository_name=repository_name,
workflow_id=workflow_id)
if workflow_usage:
usage_result.append({
'workflow_name': workflow_name,
'workflow_id': workflow_id,
'repository': repository_name,
'workflow_usage': workflow_usage
})
ec = {
'GitHub.ActionsUsage': usage_result
}
human_readable = tableToMarkdown('Github Actions Usage', usage_result,
headerTransform=lambda h: h.replace('_', ' ').title())
return_outputs(readable_output=human_readable, outputs=ec, raw_response=usage_result)
|
def get_github_actions_usage():
""" List github actions workflows usage of private repositories.
"""
command_args = demisto.args()
owner_name = command_args.get('owner', '')
usage_result = []
private_repositories = list_owner_repositories(owner_name=owner_name, repo_type="private")
for repository_name in private_repositories:
repository_workflows = list_repository_workflows(owner_name=owner_name, repository_name=repository_name)
for workflow in repository_workflows:
workflow_id = workflow.get('id', '')
workflow_name = workflow.get('name', '')
workflow_usage = get_workflow_usage(owner_name=owner_name, repository_name=repository_name,
workflow_id=workflow_id)
if workflow_usage:
usage_result.append({
'workflow_name': workflow_name,
'workflow_id': workflow_id,
'repository': repository_name,
'workflow_usage': workflow_usage,
})
ec = {
'GitHub.ActionsUsage': usage_result
}
human_readable = tableToMarkdown('Github Actions Usage', usage_result,
headerTransform=lambda h: h.replace('_', ' ').title())
return_outputs(readable_output=human_readable, outputs=ec, raw_response=usage_result)
|
59,389 |
def sweep(
sweep: Union[dict, Callable], entity: str = None, project: str = None,
) -> str:
"""Initialize a hyperparameter sweep.
To generate hyperparameter suggestions from the sweep and use them
to train a model, call `wandb.agent` with the sweep_id returned by
this command. For command line functionality, see the command line
tool `wandb sweep` (https://docs.wandb.ai/ref/cli/wandb-sweep).
Args:
sweep: dict, SweepConfig, or callable. The sweep configuration
(or configuration generator). If a dict or SweepConfig,
should conform to the W&B sweep config specification
(https://docs.wandb.ai/guides/sweeps/configuration). If a
callable, should take no arguments and return a dict that
conforms to the W&B sweep config spec.
entity: str (optional). An entity is a username or team name
where you're sending runs. This entity must exist before you
can send runs there, so make sure to create your account or
team in the UI before starting to log runs. If you don't
specify an entity, the run will be sent to your default
entity, which is usually your username. Change your default
entity in [Settings](wandb.ai/settings) under "default
location to create new projects".
project: str (optional). The name of the project where you're
sending the new run. If the project is not specified, the
run is put in an "Uncategorized" project.
Returns:
sweep_id: str. A unique identifier for the sweep.
Examples:
Basic usage
<!--yeadoc-test:one-parameter-sweep-->
```python
import wandb
sweep_configuration = {
"name": "my-awesome-sweep",
"metric": {"name": "accuracy", "goal": "maximize"},
"method": "grid",
"parameters": {
"a": {
"values": [1, 2, 3, 4]
}
}
}
def my_train_func():
# read the current value of parameter "a" from wandb.config
wandb.init()
a = wandb.config.a
wandb.log({"a": a, "accuracy": a + 1})
sweep_id = wandb.sweep(sweep_configuration)
# run the sweep
wandb.agent(sweep_id, function=my_train_func)
```
"""
if callable(sweep):
sweep = sweep()
"""Sweep create for controller api and jupyter (eventually for cli)."""
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
# Make sure we are logged in
wandb_login._login(_silent=True, _disable_warning=True)
api = InternalApi()
sweep_id, warnings = api.upsert_sweep(sweep)
handle_sweep_config_violations(warnings)
print("Create sweep with ID:", sweep_id)
sweep_url = _get_sweep_url(api, sweep_id)
if sweep_url:
print("Sweep URL:", sweep_url)
return sweep_id
|
def sweep(
sweep: Union[dict, Callable], entity: str = None, project: str = None,
) -> str:
"""Initialize a hyperparameter sweep.
To generate hyperparameter suggestions from the sweep and use them
to train a model, call `wandb.agent` with the sweep_id returned by
this command. For command line functionality, see the command line
tool `wandb sweep` (https://docs.wandb.ai/ref/cli/wandb-sweep).
Args:
sweep: dict, SweepConfig, or callable. The sweep configuration
(or configuration generator). If a dict or SweepConfig,
should conform to the W&B sweep config specification
(https://docs.wandb.ai/guides/sweeps/configuration). If a
callable, should take no arguments and return a dict that
conforms to the W&B sweep config spec.
entity: str (optional). An entity is a username or team name
where you're sending runs. This entity must exist before you
can send runs there, so make sure to create your account or
team in the UI before starting to log runs. If you don't
specify an entity, the run will be sent to your default
entity, which is usually your username. Change your default
entity in [Settings](wandb.ai/settings) under "default
location to create new projects".
project: str (optional). The name of the project where you're
sending the new run. If the project is not specified, the
run is put in an "Uncategorized" project.
Returns:
sweep_id: str. A unique identifier for the sweep.
Examples:
Basic usage
<!--yeadoc-test:one-parameter-sweep-->
```python
import wandb
sweep_configuration = {
"name": "my-awesome-sweep",
"metric": {"name": "accuracy", "goal": "maximize"},
"method": "grid",
"parameters": {
"a": {
"values": [1, 2, 3, 4]
}
}
}
def my_train_func():
# read the current value of parameter "a" from wandb.config
wandb.init()
a = wandb.config.a
wandb.log({"a": a, "accuracy": a + 1})
sweep_id = wandb.sweep(sweep_configuration)
# run the sweep
wandb.agent(sweep_id, function=my_train_func)
```
"""
if callable(sweep):
sweep = sweep()
"""Sweep create for controller api and jupyter (eventually for cli)."""
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
# Make sure we are logged in
if wandb.run is not None:
wandb_login._login(_silent=True)
api = InternalApi()
sweep_id, warnings = api.upsert_sweep(sweep)
handle_sweep_config_violations(warnings)
print("Create sweep with ID:", sweep_id)
sweep_url = _get_sweep_url(api, sweep_id)
if sweep_url:
print("Sweep URL:", sweep_url)
return sweep_id
|
39,558 |
def get_unique_patched_packages_with_affected_packages(
affected_packages: List[AffectedPackage],
):
unique_patched_packages_with_affected_packages = {}
for package in affected_packages:
if package.patched_package not in unique_patched_packages_with_affected_packages:
unique_patched_packages_with_affected_packages[package.patched_package] = []
unique_patched_packages_with_affected_packages[package.patched_package].append(
package.vulnerable_package
)
return unique_patched_packages_with_affected_packages
|
def get_unique_patched_packages_with_affected_packages(
affected_packages: List[AffectedPackage],
):
unique_patched_packages_with_affected_packages = defaultdict(list)
for package in affected_packages:
if package.patched_package not in unique_patched_packages_with_affected_packages:
unique_patched_packages_with_affected_packages[package.patched_package] = []
unique_patched_packages_with_affected_packages[package.patched_package].append(
package.vulnerable_package
)
return unique_patched_packages_with_affected_packages
|
41,783 |
def plot_intermediate_values(study):
# type: (Study) -> go.Figure
"""Plot intermediate values of all trials in a study.
Example:
The following code snippet shows how to plot intermediate values.
.. testcode::
import optuna
# Derivative function for x**2
def df(x):
return 2*x
def objective(trial):
next_x = 1 # We start the search at x=1
gamma = trial.suggest_loguniform('alpha', 1e-5, 1e-1) # Step size multiplier
# Stepping through gradient descent to find the minima of x**2
for step in range(100):
current_x = next_x
next_x = current_x - gamma * df(current_x)
delta = next_x - current_x
trial.report(current_x, step)
return delta
study = optuna.create_study()
study.optimize(objective, n_trials=5)
optuna.visualization.plot_intermediate_values(study)
.. raw:: html
<iframe src="../_static/plot_intermediate_values.html"
width="100%" height="500px" frameborder="0">
</iframe>
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate
values.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_check_plotly_availability()
return _get_intermediate_plot(study)
|
def plot_intermediate_values(study):
# type: (Study) -> go.Figure
"""Plot intermediate values of all trials in a study.
Example:
The following code snippet shows how to plot intermediate values.
.. testcode::
import optuna
# Derivative function for x ** 2
def df(x):
return 2*x
def objective(trial):
next_x = 1 # We start the search at x=1
gamma = trial.suggest_loguniform('alpha', 1e-5, 1e-1) # Step size multiplier
# Stepping through gradient descent to find the minima of x**2
for step in range(100):
current_x = next_x
next_x = current_x - gamma * df(current_x)
delta = next_x - current_x
trial.report(current_x, step)
return delta
study = optuna.create_study()
study.optimize(objective, n_trials=5)
optuna.visualization.plot_intermediate_values(study)
.. raw:: html
<iframe src="../_static/plot_intermediate_values.html"
width="100%" height="500px" frameborder="0">
</iframe>
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their intermediate
values.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_check_plotly_availability()
return _get_intermediate_plot(study)
|
1,039 |
def test_right_regist_hash():
hash_name = 'my_hash'
assert hash_name not in _HASHES
register_hash(hash_name, hashlib.sha256)
assert _HASHES[hash_name] == hashlib.sha256
|
def test_right_register_hash():
hash_name = 'my_hash'
assert hash_name not in _HASHES
register_hash(hash_name, hashlib.sha256)
assert _HASHES[hash_name] == hashlib.sha256
|
32,486 |
def convert_start_fetch_to_milliseconds(fetch_start_time: str):
"""
Convert a timestamp string to milliseconds
Args:t
fetch_start_time (str): First fetch timestamp.
Returns:
(int): time since (epoch - first_fetch) in milliseconds.
"""
date = dateparser.parse(fetch_start_time, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid first_fetch format: {fetch_start_time}')
return int(date.timestamp() * 1000)
|
def convert_start_fetch_to_milliseconds(fetch_start_time: str):
"""
Convert a timestamp string to milliseconds
Args:t
fetch_start_time (str): First fetch timestamp.
Returns:
(int): time since (epoch - first_fetch) in milliseconds.
"""
date = dateparser.parse(fetch_start_time, settings={'TIMEZONE': 'UTC'})
if date is None:
# if date is None it means dateparser failed to parse it
raise ValueError(f'Invalid first_fetch format: {fetch_start_time}')
return int(date.timestamp() * 1000)
|
9,152 |
def is_celery_queue_long():
"""
Checks whether celery queue is too long.
It does trigger if it is too long for at least one hour. This way peaks are
filtered out, and no warning need be issued for big operations (e.g. site-wide autotranslate).
"""
cache_key = "celery_queue_stats"
queues_data = cache.get(cache_key, {})
# Hours since epoch
current_hour = int(time.time() / 3600)
test_hour = current_hour - 1
# Fetch current stats
stats = get_queue_stats()
# Update counters
if current_hour not in queues_data:
# Delete stale items
for key in list(queues_data.keys()):
if key < test_hour:
del queues_data[key]
# Add current one
queues_data[current_hour] = stats
# Store to cache
cache.set(cache_key, queues_data, 7200)
# Do not fire if we do not have counts for two hours ago
if test_hour not in queues_data:
return False
# Check if any queue got bigger
base = queues_data[test_hour]
thresholds = defaultdict(lambda: 50)
thresholds["translate"] = 1000
return any(
stat > thresholds[key] and base.get(key, 0) > thresholds[key]
for key, stat in stats.items()
)
|
def is_celery_queue_long():
"""
Checks whether celery queue is too long.
It does trigger if it is too long for at least one hour. This way peaks are
filtered out, and no warning need be issued for big operations (e.g. site-wide autotranslation).
"""
cache_key = "celery_queue_stats"
queues_data = cache.get(cache_key, {})
# Hours since epoch
current_hour = int(time.time() / 3600)
test_hour = current_hour - 1
# Fetch current stats
stats = get_queue_stats()
# Update counters
if current_hour not in queues_data:
# Delete stale items
for key in list(queues_data.keys()):
if key < test_hour:
del queues_data[key]
# Add current one
queues_data[current_hour] = stats
# Store to cache
cache.set(cache_key, queues_data, 7200)
# Do not fire if we do not have counts for two hours ago
if test_hour not in queues_data:
return False
# Check if any queue got bigger
base = queues_data[test_hour]
thresholds = defaultdict(lambda: 50)
thresholds["translate"] = 1000
return any(
stat > thresholds[key] and base.get(key, 0) > thresholds[key]
for key, stat in stats.items()
)
|
57,758 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
base_url = demisto.params()['url']
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.log(f'Command being called is \'{demisto.command()}\'')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get Acalvio API Server url
params = demisto.params()
base_url = params['url'].rstrip('/')
# get Acalvio API Key
apikey = demisto.params()['apikey']
# check if SSL is to be verified
verify_certificate = demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
# set the headers
headers = {
'api_key': apikey,
'content-type': 'application/json'
}
demisto.log(f'Command being called is \'{demisto.command()}\'')
result = None
acalerror = AcalError()
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button
result, acalerror = do_test_connection(client)
elif demisto.command() == 'acalvio-is-deception-host':
result, acalerror = \
do_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-file':
result, acalerror = \
do_deception_file_command(client, demisto.args())
elif demisto.command() == 'acalvio-is-deception-user':
result, acalerror = \
do_deception_user_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-host':
result, acalerror = \
do_mute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-host':
result, acalerror = \
do_unmute_deception_host_command(client, demisto.args())
elif demisto.command() == 'acalvio-mute-deception-on-endpoint':
result, acalerror = \
do_mute_deception_ep_command(client, demisto.args())
elif demisto.command() == 'acalvio-unmute-deception-on-endpoint':
result, acalerror = \
do_unmute_deception_ep_command(client, demisto.args())
# Log exceptions
except Exception as e:
acalerror = AcalError(message=f'Failed to execute \'{demisto.command()}\' command. Error: {str(e)}')
finally:
if result is not None:
return_results(result)
else:
if acalerror is None:
acalerror = AcalError()
return_error(message=acalerror.message,
error=acalerror.error,
outputs=acalerror.outputs)
|
8,143 |
def get_rectangle_coordinates(bottom_left, *, top_right = None, width: u.deg = None, height: u.deg = None):
if width is None and height is None:
if not top_right:
if bottom_left.shape[0] != 2:
raise IndexError("If top_right is not specified bottom_left must have length two or width and height must be provided.")
else:
top_right = bottom_left[1]
bottom_left = bottom_left[0]
if type(bottom_left) is not type(top_right):
raise ValueError("top_right must be of same type as bottom_left")
if isinstance(bottom_left, (SkyCoord, BaseCoordinateFrame)):
top_right = top_right.transform_to(bottom_left.frame.name)
width = (top_right.spherical.lon - bottom_left.spherical.lon).to(u.deg) # Getting the difference in Longitudes.
height = (top_right.spherical.lat - bottom_left.spherical.lat).to(u.deg) # Getting the difference in Latitudes.
if width <= 0*u.deg or height <= 0*u.deg:
raise ValueError("top_right must be to the right and above bottom_left.")
else:
raise ValueError("Invalid value passed for bottom_left")
elif top_right is None:
top_right = SkyCoord(bottom_left.data.lon + width, bottom_left.data.lat + height, frame=bottom_left.frame.name)
else:
raise ValueError("Invalid input, bottom_left and top_right must either be SkyCoord")
return (bottom_left, top_right)
|
def get_rectangle_coordinates(bottom_left, *, top_right = None, width: u.deg = None, height: u.deg = None):
if width is None and height is None:
if not top_right:
if bottom_left.shape[0] != 2:
raise IndexError("If top_right is not specified bottom_left must have length two or width and height must be provided.")
else:
top_right = bottom_left[1]
bottom_left = bottom_left[0]
if type(bottom_left) is not type(top_right):
raise ValueError("top_right must be of same type as bottom_left")
if isinstance(bottom_left, (SkyCoord, BaseCoordinateFrame)):
top_right = top_right.transform_to(bottom_left.frame.name)
width = top_right.unit_spherical.lon - bottom_left.unit_spherical.lon # Getting the difference in Longitudes.
height = (top_right.spherical.lat - bottom_left.spherical.lat).to(u.deg) # Getting the difference in Latitudes.
if width <= 0*u.deg or height <= 0*u.deg:
raise ValueError("top_right must be to the right and above bottom_left.")
else:
raise ValueError("Invalid value passed for bottom_left")
elif top_right is None:
top_right = SkyCoord(bottom_left.data.lon + width, bottom_left.data.lat + height, frame=bottom_left.frame.name)
else:
raise ValueError("Invalid input, bottom_left and top_right must either be SkyCoord")
return (bottom_left, top_right)
|
7,882 |
def dose_coefficients(particle, geometry='AP'):
"""Return effective dose conversion coefficients from ICRP-116
This function provides fluence to dose conversion coefficients for effective
dose for various types of external exposures based on values in `ICRP
Publication 116 <https://doi.org/10.1016/j.icrp.2011.10.001>`_.
Parameters
----------
particle : {'neutron', 'photon', 'electron', 'positron'}
Incident particle
geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'}
Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the
options here.
Returns
-------
energy : numpy.ndarray
Energies at which dose conversion coefficients are given
dose : numpy.ndarray
Effective dose in [pSv cm^2] at provided energies
"""
if not _DOSE_ICRP116:
_load_dose_icrp116()
# Get all data for selected particle
data = _DOSE_ICRP116.get(particle)
if data is None:
raise ValueError("{} has no effective dose data".format(particle))
# Determine index for selected geometry
if particle in ('neutron', 'photon', 'proton'):
index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry)
else:
index = ('AP', 'PA', 'ISO').index(geometry)
# Pull out energy and dose from table
energy = data[:, 0].copy()
dose = data[:, index + 1].copy()
return energy, dose
|
def dose_coefficients(particle, geometry='AP'):
"""Return effective dose conversion coefficients from ICRP-116
This function provides fluence to dose conversion coefficients for effective
dose for various types of external exposures based on values in `ICRP
Publication 116 <https://doi.org/10.1016/j.icrp.2011.10.001>`_.
Parameters
----------
particle : {'neutron', 'photon', 'electron', 'positron'}
Incident particle
geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'}
Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the
options here.
Returns
-------
energy : numpy.ndarray
Energies at which dose conversion coefficients are given
dose : numpy.ndarray
Effective dose in [pSv cm^2] at provided energies
"""
if not _DOSE_ICRP116:
_load_dose_icrp116()
# Get all data for selected particle
data = _DOSE_ICRP116.get(particle)
if data is None:
raise ValueError("{} has no effective dose data".format(particle))
# Determine index for selected geometry
if particle in ('neutron', 'photon', 'proton'):
index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry)
else:
index = ('AP', 'PA', 'ISO').index(geometry)
# Pull out energy and dose from table
energy = data[:, 0].copy()
dose_coeffs = data[:, index + 1].copy()
return energy, dose
|
17,519 |
def _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
inline_array,
**extra_tokens,
):
if not isinstance(chunks, (int, dict)) and chunks not in {None, "auto"}:
raise ValueError(
f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}."
)
_protect_dataset_variables_inplace(backend_ds, cache)
if chunks is None:
ds = backend_ds
else:
ds = _chunk_ds(
backend_ds,
filename_or_obj,
engine,
chunks,
overwrite_encoded_chunks,
inline_array,
**extra_tokens,
)
ds.set_close(backend_ds._close)
# Ensure source filename always stored in dataset object
if "source" not in ds.encoding:
ds.encoding["source"] = _normalize_path(filename_or_obj)
return ds
|
def _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
inline_array,
**extra_tokens,
):
if not isinstance(chunks, (int, dict)) and chunks not in {None, "auto"}:
raise ValueError(
f"chunks must be an int, dict, 'auto', or None. Instead found {chunks}."
)
_protect_dataset_variables_inplace(backend_ds, cache)
if chunks is None:
ds = backend_ds
else:
ds = _chunk_ds(
backend_ds,
filename_or_obj,
engine,
chunks,
overwrite_encoded_chunks,
inline_array,
**extra_tokens,
)
ds.set_close(backend_ds._close)
# Ensure source filename always stored in dataset object
if "source" not in ds.encoding and isinstance(filename_or_obj, (str, os.PathLike)):
ds.encoding["source"] = _normalize_path(filename_or_obj)
return ds
|
1,243 |
def type_info(np_type):
""" Return dict with min, max, nexp, nmant, width for numpy type `np_type`
Type can be integer in which case nexp and nmant are None.
Parameters
----------
np_type : numpy type specifier
Any specifier for a numpy dtype
Returns
-------
info : dict
with fields ``min`` (minimum value), ``max`` (maximum value), ``nexp``
(exponent width), ``nmant`` (significand precision not including
implicit first digit), ``minexp`` (minimum exponent), ``maxexp``
(maximum exponent), ``width`` (width in bytes). (``nexp``, ``nmant``,
``minexp``, ``maxexp``) are None for integer types. Both ``min`` and
``max`` are of type `np_type`.
Raises
------
FloatingError
for floating point types we don't recognize
Notes
-----
You might be thinking that ``np.finfo`` does this job, and it does, except
for PPC long doubles (https://github.com/numpy/numpy/issues/2669) and
float96 on Windows compiled with Mingw. This routine protects against such
errors in ``np.finfo`` by only accepting values that we know are likely to
be correct.
"""
dt = np.dtype(np_type)
np_type = dt.type
width = dt.itemsize
try: # integer type
info = np.iinfo(dt)
except ValueError:
pass
else:
return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,
maxexp=None, nmant=None, nexp=None, width=width)
info = np.finfo(dt)
# Trust the standard IEEE types
nmant, nexp = info.nmant, info.nexp
ret = dict(min=np_type(info.min),
max=np_type(info.max),
nmant=nmant,
nexp=nexp,
minexp=info.minexp,
maxexp=info.maxexp,
width=width)
if np_type in (np.float16, np.float32, np.float64,
np.complex64, np.complex128):
return ret
info_64 = np.finfo(np.float64)
if dt.kind == 'c':
assert np_type is np.longcomplex
vals = (nmant, nexp, width / 2)
else:
assert np_type is np.longdouble
vals = (nmant, nexp, width)
if vals in ((112, 15, 16), # binary128
(info_64.nmant, info_64.nexp, 8), # float64
(63, 15, 12), (63, 15, 16)): # Intel extended 80
return ret # these are OK without modification
# The remaining types are longdoubles with bad finfo values. Some we
# correct, others we wait to hear of errors.
# We start with float64 as basis
ret = type_info(np.float64)
if vals in ((52, 15, 12), # windows float96
(52, 15, 16)): # windows float128?
# On windows 32 bit at least, float96 is Intel 80 storage but operating
# at float64 precision. The finfo values give nexp == 15 (as for intel
# 80) but in calculations nexp in fact appears to be 11 as for float64
ret.update(dict(width=width))
return ret
if vals == (105, 11, 16): # correctly detected double double
ret.update(dict(nmant=nmant, nexp=nexp, width=width))
return ret
# Oh dear, we don't recognize the type information. Try some known types
# and then give up. At this stage we're expecting exotic longdouble or
# their complex equivalent.
if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):
raise FloatingError(f'We had not expected type {np_type}')
if (vals == (1, 1, 16) and on_powerpc() and
_check_maxexp(np.longdouble, 1024)):
# double pair on PPC. The _check_nmant routine does not work for this
# type, hence the powerpc platform check instead
ret.update(dict(nmant=106, width=width))
elif (_check_nmant(np.longdouble, 52) and
_check_maxexp(np.longdouble, 11)):
# Got float64 despite everything
pass
elif (_check_nmant(np.longdouble, 112) and
_check_maxexp(np.longdouble, 16384)):
# binary 128, but with some busted type information. np.longcomplex
# seems to break here too, so we need to use np.longdouble and
# complexify
two = np.longdouble(2)
# See: https://matthew-brett.github.io/pydagogue/floating_point.html
max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383
if np_type is np.longcomplex:
max_val += 0j
ret = dict(min=-max_val,
max=max_val,
nmant=112,
nexp=15,
minexp=-16382,
maxexp=16384,
width=width)
else: # don't recognize the type
raise FloatingError(f'We had not expected long double '
f'type {np_type} with info {info}')
return ret
|
def type_info(np_type):
""" Return dict with min, max, nexp, nmant, width for numpy type `np_type`
Type can be integer in which case nexp and nmant are None.
Parameters
----------
np_type : numpy type specifier
Any specifier for a numpy dtype
Returns
-------
info : dict
with fields ``min`` (minimum value), ``max`` (maximum value), ``nexp``
(exponent width), ``nmant`` (significand precision not including
implicit first digit), ``minexp`` (minimum exponent), ``maxexp``
(maximum exponent), ``width`` (width in bytes). (``nexp``, ``nmant``,
``minexp``, ``maxexp``) are None for integer types. Both ``min`` and
``max`` are of type `np_type`.
Raises
------
FloatingError
for floating point types we don't recognize
Notes
-----
You might be thinking that ``np.finfo`` does this job, and it does, except
for PPC long doubles (https://github.com/numpy/numpy/issues/2669) and
float96 on Windows compiled with Mingw. This routine protects against such
errors in ``np.finfo`` by only accepting values that we know are likely to
be correct.
"""
dt = np.dtype(np_type)
np_type = dt.type
width = dt.itemsize
try: # integer type
info = np.iinfo(dt)
except ValueError:
pass
else:
return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,
maxexp=None, nmant=None, nexp=None, width=width)
info = np.finfo(dt)
# Trust the standard IEEE types
nmant, nexp = info.nmant, info.nexp
ret = dict(min=np_type(info.min),
max=np_type(info.max),
nmant=nmant,
nexp=nexp,
minexp=info.minexp,
maxexp=info.maxexp,
width=width)
if np_type in (np.float16, np.float32, np.float64,
np.complex64, np.complex128):
return ret
info_64 = np.finfo(np.float64)
if dt.kind == 'c':
assert np_type is np.longcomplex
vals = (nmant, nexp, width / 2)
else:
assert np_type is np.longdouble
vals = (nmant, nexp, width)
if vals in ((112, 15, 16), # binary128
(info_64.nmant, info_64.nexp, 8), # float64
(63, 15, 12), (63, 15, 16)): # Intel extended 80
return ret # these are OK without modification
# The remaining types are longdoubles with bad finfo values. Some we
# correct, others we wait to hear of errors.
# We start with float64 as basis
ret = type_info(np.float64)
if vals in ((52, 15, 12), # windows float96
(52, 15, 16)): # windows float128?
# On windows 32 bit at least, float96 is Intel 80 storage but operating
# at float64 precision. The finfo values give nexp == 15 (as for intel
# 80) but in calculations nexp in fact appears to be 11 as for float64
ret.update(dict(width=width))
return ret
if vals == (105, 11, 16): # correctly detected double double
ret.update(dict(nmant=nmant, nexp=nexp, width=width))
return ret
# Oh dear, we don't recognize the type information. Try some known types
# and then give up. At this stage we're expecting exotic longdouble or
# their complex equivalent.
if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):
raise FloatingError(f'We had not expected type {np_type}')
if (vals == (1, 1, 16) and on_powerpc() and
_check_maxexp(np.longdouble, 1024)):
# double pair on PPC. The _check_nmant routine does not work for this
# type, hence the powerpc platform check instead
ret.update(dict(nmant=106, width=width))
elif (_check_nmant(np.longdouble, 52) and
_check_maxexp(np.longdouble, 11)):
# Got float64 despite everything
pass
elif (_check_nmant(np.longdouble, 112) and
_check_maxexp(np.longdouble, 16384)):
# binary 128, but with some busted type information. np.longcomplex
# seems to break here too, so we need to use np.longdouble and
# complexify
two = np.longdouble(2)
# See: https://matthew-brett.github.io/pydagogue/floating_point.html
max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383
if np_type is np.longcomplex:
max_val += 0j
ret = dict(min=-max_val,
max=max_val,
nmant=112,
nexp=15,
minexp=-16382,
maxexp=16384,
width=width)
else: # don't recognize the type
raise FloatingError(f'We had not expected long double type {np_type} with info {info}')
return ret
|
29,610 |
def test_nprocs_negative(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(["dask-worker", "127.0.0.1:8786", "--nprocs=-1"]) as worker:
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
start = time()
cpus = cpu_count()
while len(c.scheduler_info()["workers"]) != cpus:
sleep(0.2)
assert time() < start + 10
|
def test_nprocs_negative(loop):
with popen(["dask-scheduler", "--no-dashboard"]) as sched:
with popen(["dask-worker", "127.0.0.1:8786", "--nprocs=-1"]) as worker:
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
c.wait_for_workers(cpu_count(), timeout="10 seconds")
|
12,758 |
def test_workflow_jobs(python_versions: list[str], *, cron: bool) -> Jobs:
linux_x86_64_helper = Helper(Platform.LINUX_X86_64)
jobs: dict[str, Any] = {
"check_labels": {
"name": "Ensure PR has a category label",
"runs-on": linux_x86_64_helper.runs_on(),
"if": IS_PANTS_OWNER,
"steps": ensure_category_label(),
},
}
jobs.update(**linux_x86_64_jobs(python_versions, cron=cron))
jobs.update(**macos11_x86_64_jobs(python_versions, cron=cron))
if not cron:
jobs.update(**macos_10_15_x86_64_jobs(python_versions))
jobs.update(**macos11_arm64_jobs())
jobs.update(
{
"lint_python": {
"name": "Lint Python and Shell",
"runs-on": linux_x86_64_helper.runs_on(),
"needs": "bootstrap_pants_linux_x86_64",
"strategy": {"matrix": {"python-version": python_versions}},
"timeout-minutes": 30,
"if": IS_PANTS_OWNER,
"steps": [
*checkout(),
*setup_primary_python(),
linux_x86_64_helper.native_binaries_download(),
setup_toolchain_auth(),
{
"name": "Lint",
"run": (
# Note: we use `**` rather than `::` because regex-lint.
"./pants lint check '**'\n"
),
},
linux_x86_64_helper.upload_log_artifacts(name="lint"),
],
},
}
)
return jobs
|
def test_workflow_jobs(python_versions: list[str], *, cron: bool) -> Jobs:
linux_x86_64_helper = Helper(Platform.LINUX_X86_64)
jobs: dict[str, Any] = {
"check_labels": {
"name": "Ensure PR has a category label",
"runs-on": linux_x86_64_helper.runs_on(),
"if": IS_PANTS_OWNER,
"steps": ensure_category_label(),
},
}
jobs.update(**linux_x86_64_jobs(python_versions, cron=cron))
jobs.update(**macos11_x86_64_jobs(python_versions, cron=cron))
if not cron:
jobs.update(**macos_10_15_x86_64_jobs(python_versions))
jobs.update(**macos11_arm64_jobs())
jobs.update(
{
"lint_python": {
"name": "Lint Python and Shell",
"runs-on": linux_x86_64_helper.runs_on(),
"needs": "bootstrap_pants_linux_x86_64",
"strategy": {"matrix": {"python-version": python_versions}},
"timeout-minutes": 30,
"if": IS_PANTS_OWNER,
"steps": [
*checkout(),
*setup_primary_python(),
linux_x86_64_helper.native_binaries_download(),
setup_toolchain_auth(),
{
"name": "Lint",
"run": (
"./pants lint check ::\n"
),
},
linux_x86_64_helper.upload_log_artifacts(name="lint"),
],
},
}
)
return jobs
|
39,135 |
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values."
"The value for `n_mels` ({}) may be set too high."
"Or, the value for `n_freqs` ({}) may be set too low.".format(n_mels, n_freqs)
)
return fb
|
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
"The value for `n_mels` ({}) may be set too high."
"Or, the value for `n_freqs` ({}) may be set too low.".format(n_mels, n_freqs)
)
return fb
|
49,952 |
def _save(im, fp, filename):
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError:
raise OSError("cannot write mode %s as TIFF" % im.mode)
ifd = ImageFileDirectory_v2(prefix=prefix)
compression = im.encoderinfo.get("compression", im.info.get("compression"))
if compression is None:
compression = "raw"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1)
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
info = im.encoderinfo.get("tiffinfo", {})
if DEBUG:
print("Tiffinfo Keys: %s" % list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
# additions written by Greg Couch, gregc@cgl.ucsf.edu
# inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
if "icc_profile" in im.info:
ifd[ICCPROFILE] = im.info["icc_profile"]
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in im.encoderinfo:
ifd[key] = im.encoderinfo[name]
dpi = im.encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = int(dpi[0] + 0.5)
ifd[Y_RESOLUTION] = int(dpi[1] + 0.5)
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
ifd[PHOTOMETRIC_INTERPRETATION] = photo
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
ifd[COLORMAP] = tuple(i8(v) * 256 for v in lut)
# data orientation
stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8)
ifd[ROWSPERSTRIP] = im.size[1]
stripByteCounts = stride * im.size[1]
if stripByteCounts >= 2 ** 16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = stripByteCounts
ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if libtiff:
if "quality" in im.encoderinfo:
quality = im.encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
raise ValueError("Invalid quality setting")
if compression != "jpeg":
raise ValueError(
"quality setting only supported for 'jpeg' compression"
)
ifd[JPEGQUALITY] = quality
if DEBUG:
print("Saving using libtiff encoder")
print("Items: %s" % sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = os.dup(fp.fileno())
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# SAMPLEFORMAT is determined by the image format and should not be copied
# from legacy_ifd.
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
blocklist = [
COLORMAP,
REFERENCEBLACKWHITE,
SAMPLEFORMAT,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
]
atts = {}
# bits per sample is a single short in the tiff directory, not a list.
atts[BITSPERSAMPLE] = bits[0]
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
for tag, value in itertools.chain(
ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items()
):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if (
TiffTags.lookup(tag).type == TiffTags.UNDEFINED
or not Image.core.libtiff_support_custom_tags
):
continue
if tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif not (isinstance(value, (int, float, str, bytes))):
continue
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
if DEBUG:
print("Converted items: %s" % sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16B", "I;16"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig)
e.setimage(im.im, (0, 0) + im.size)
while True:
# undone, change to self.decodermaxblock:
l, s, d = e.encode(16 * 1024)
if not _fp:
fp.write(d)
if s:
break
if s < 0:
raise OSError("encoder error %d when writing image file" % s)
else:
offset = ifd.save(fp)
ImageFile._save(
im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))]
)
# -- helper for multi-page save --
if "_debug_multipage" in im.encoderinfo:
# just to access o32 and o16 (using correct byte order)
im._debug_multipage = ifd
|
def _save(im, fp, filename):
try:
rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode]
except KeyError:
raise OSError("cannot write mode %s as TIFF" % im.mode)
ifd = ImageFileDirectory_v2(prefix=prefix)
compression = im.encoderinfo.get("compression", im.info.get("compression"))
if compression is None:
compression = "raw"
libtiff = WRITE_LIBTIFF or compression != "raw"
# required for color libtiff images
ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1)
ifd[IMAGEWIDTH] = im.size[0]
ifd[IMAGELENGTH] = im.size[1]
# write any arbitrary tags passed in as an ImageFileDirectory
info = im.encoderinfo.get("tiffinfo", {})
if DEBUG:
print("Tiffinfo Keys: %s" % list(info))
if isinstance(info, ImageFileDirectory_v1):
info = info.to_v2()
for key in info:
ifd[key] = info.get(key)
try:
ifd.tagtype[key] = info.tagtype[key]
except Exception:
pass # might not be an IFD. Might not have populated type
# additions written by Greg Couch, gregc@cgl.ucsf.edu
# inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com
if hasattr(im, "tag_v2"):
# preserve tags from original TIFF image file
for key in (
RESOLUTION_UNIT,
X_RESOLUTION,
Y_RESOLUTION,
IPTC_NAA_CHUNK,
PHOTOSHOP_CHUNK,
XMP,
):
if key in im.tag_v2:
ifd[key] = im.tag_v2[key]
ifd.tagtype[key] = im.tag_v2.tagtype[key]
# preserve ICC profile (should also work when saving other formats
# which support profiles as TIFF) -- 2008-06-06 Florian Hoech
if "icc_profile" in im.info:
ifd[ICCPROFILE] = im.info["icc_profile"]
for key, name in [
(IMAGEDESCRIPTION, "description"),
(X_RESOLUTION, "resolution"),
(Y_RESOLUTION, "resolution"),
(X_RESOLUTION, "x_resolution"),
(Y_RESOLUTION, "y_resolution"),
(RESOLUTION_UNIT, "resolution_unit"),
(SOFTWARE, "software"),
(DATE_TIME, "date_time"),
(ARTIST, "artist"),
(COPYRIGHT, "copyright"),
]:
if name in im.encoderinfo:
ifd[key] = im.encoderinfo[name]
dpi = im.encoderinfo.get("dpi")
if dpi:
ifd[RESOLUTION_UNIT] = 2
ifd[X_RESOLUTION] = int(dpi[0] + 0.5)
ifd[Y_RESOLUTION] = int(dpi[1] + 0.5)
if bits != (1,):
ifd[BITSPERSAMPLE] = bits
if len(bits) != 1:
ifd[SAMPLESPERPIXEL] = len(bits)
if extra is not None:
ifd[EXTRASAMPLES] = extra
if format != 1:
ifd[SAMPLEFORMAT] = format
ifd[PHOTOMETRIC_INTERPRETATION] = photo
if im.mode in ["P", "PA"]:
lut = im.im.getpalette("RGB", "RGB;L")
ifd[COLORMAP] = tuple(i8(v) * 256 for v in lut)
# data orientation
stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8)
ifd[ROWSPERSTRIP] = im.size[1]
strip_byte_counts = stride * im.size[1]
if strip_byte_counts >= 2 ** 16:
ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG
ifd[STRIPBYTECOUNTS] = strip_byte_counts
ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer
# no compression by default:
ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1)
if libtiff:
if "quality" in im.encoderinfo:
quality = im.encoderinfo["quality"]
if not isinstance(quality, int) or quality < 0 or quality > 100:
raise ValueError("Invalid quality setting")
if compression != "jpeg":
raise ValueError(
"quality setting only supported for 'jpeg' compression"
)
ifd[JPEGQUALITY] = quality
if DEBUG:
print("Saving using libtiff encoder")
print("Items: %s" % sorted(ifd.items()))
_fp = 0
if hasattr(fp, "fileno"):
try:
fp.seek(0)
_fp = os.dup(fp.fileno())
except io.UnsupportedOperation:
pass
# optional types for non core tags
types = {}
# SAMPLEFORMAT is determined by the image format and should not be copied
# from legacy_ifd.
# STRIPOFFSETS and STRIPBYTECOUNTS are added by the library
# based on the data in the strip.
# The other tags expect arrays with a certain length (fixed or depending on
# BITSPERSAMPLE, etc), passing arrays with a different length will result in
# segfaults. Block these tags until we add extra validation.
blocklist = [
COLORMAP,
REFERENCEBLACKWHITE,
SAMPLEFORMAT,
STRIPBYTECOUNTS,
STRIPOFFSETS,
TRANSFERFUNCTION,
]
atts = {}
# bits per sample is a single short in the tiff directory, not a list.
atts[BITSPERSAMPLE] = bits[0]
# Merge the ones that we have with (optional) more bits from
# the original file, e.g x,y resolution so that we can
# save(load('')) == original file.
legacy_ifd = {}
if hasattr(im, "tag"):
legacy_ifd = im.tag.to_v2()
for tag, value in itertools.chain(
ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items()
):
# Libtiff can only process certain core items without adding
# them to the custom dictionary.
# Custom items are supported for int, float, unicode, string and byte
# values. Other types and tuples require a tagtype.
if tag not in TiffTags.LIBTIFF_CORE:
if (
TiffTags.lookup(tag).type == TiffTags.UNDEFINED
or not Image.core.libtiff_support_custom_tags
):
continue
if tag in ifd.tagtype:
types[tag] = ifd.tagtype[tag]
elif not (isinstance(value, (int, float, str, bytes))):
continue
if tag not in atts and tag not in blocklist:
if isinstance(value, str):
atts[tag] = value.encode("ascii", "replace") + b"\0"
elif isinstance(value, IFDRational):
atts[tag] = float(value)
else:
atts[tag] = value
if DEBUG:
print("Converted items: %s" % sorted(atts.items()))
# libtiff always expects the bytes in native order.
# we're storing image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if im.mode in ("I;16B", "I;16"):
rawmode = "I;16N"
# Pass tags as sorted list so that the tags are set in a fixed order.
# This is required by libtiff for some tags. For example, the JPEGQUALITY
# pseudo tag requires that the COMPRESS tag was already set.
tags = list(atts.items())
tags.sort()
a = (rawmode, compression, _fp, filename, tags, types)
e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig)
e.setimage(im.im, (0, 0) + im.size)
while True:
# undone, change to self.decodermaxblock:
l, s, d = e.encode(16 * 1024)
if not _fp:
fp.write(d)
if s:
break
if s < 0:
raise OSError("encoder error %d when writing image file" % s)
else:
offset = ifd.save(fp)
ImageFile._save(
im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))]
)
# -- helper for multi-page save --
if "_debug_multipage" in im.encoderinfo:
# just to access o32 and o16 (using correct byte order)
im._debug_multipage = ifd
|
52,712 |
def update_local_associations(
sydent: Sydent,
db: sqlite3.Connection,
send_email: bool,
dry_run: bool,
test: bool = False,
) -> None:
"""Update the DB table local_threepid_associations so that all stored
emails are casefolded, and any duplicate mxid's associated with the
given email are deleted.
Setting dry_run to True means that the script is being run in dry-run mode
by the user, i.e. it will run but will not send any email nor update the database.
Setting test to True means that the function is being called as part of an automated
test, and therefore we shouldn't backoff when sending emails.
:return: None
"""
logger.info("Processing rows in local_threepid_associations")
res = db.execute(
"SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"
"ORDER BY ts DESC"
)
# a dict that associates an email address with correspoinding mxids and lookup hashes
associations: Dict[str, List[Tuple[str, str, str]]] = {}
logger.info("Computing new hashes and signatures for local_threepid_associations")
# iterate through selected associations, casefold email, rehash it, and add to
# associations dict
for address, mxid in res.fetchall():
casefold_address = address.casefold()
# rehash email since hashes are case-sensitive
lookup_hash = calculate_lookup_hash(sydent, casefold_address)
if casefold_address in associations:
associations[casefold_address].append((address, mxid, lookup_hash))
else:
associations[casefold_address] = [(address, mxid, lookup_hash)]
# Deltas to apply to the database, associated with the casefolded address they're for.
deltas: Dict[str, Delta] = {}
# Iterate through the results, to build the deltas.
for casefold_address, assoc_tuples in associations.items():
# If the row is already in the right state and there's no duplicate, don't compute
# a delta for it.
if len(assoc_tuples) == 1 and assoc_tuples[0][0] == casefold_address:
continue
deltas[casefold_address] = Delta(
to_update=UpdateDelta(
address=assoc_tuples[0][0],
mxid=assoc_tuples[0][1],
lookup_hash=assoc_tuples[0][2],
)
)
if len(assoc_tuples) > 1:
# Iterate over all associations except for the first one, since we've already
# processed it.
deltas[casefold_address].to_delete = []
for address, mxid, _ in assoc_tuples[1:]:
deltas[casefold_address].to_delete.append(
DeleteDelta(
address=address,
mxid=mxid,
)
)
logger.info(f"{len(deltas)} rows to update in local_threepid_associations")
# Apply the deltas
for casefolded_address, delta in deltas.items():
if not test:
log_msg = f"Updating {casefolded_address}"
if delta.to_delete is not None:
log_msg += (
f" and deleting {len(delta.to_delete)} rows associated with it"
)
logger.info(log_msg)
try:
# Delete each association, and send an email mentioning the affected MXID.
if delta.to_delete is not None:
for to_delete in delta.to_delete:
if send_email and not dry_run:
# If the MXID is one that will still be associated with this
# email address after this run, don't send an email for it.
if to_delete.mxid == delta.to_update.mxid:
continue
sendEmailWithBackoff(
sydent,
to_delete.address,
to_delete.mxid,
test=test,
)
if not dry_run:
cur = db.cursor()
cur.execute(
"DELETE FROM local_threepid_associations WHERE medium = 'email' AND address = ?",
(to_delete.address,),
)
db.commit()
logger.info(
f"Deleting {to_delete.address} from table local_threepid_associations"
)
# Update the row now that there's no duplicate.
if not dry_run:
cur = db.cursor()
cur.execute(
"UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE medium = 'email' AND address = ? AND mxid = ?",
(
casefolded_address,
delta.to_update.lookup_hash,
delta.to_update.address,
delta.to_update.mxid,
),
)
logger.info(
f"Updating table local threepid associations setting address to {casefolded_address},"
f"lookup_hash to {delta.to_update.lookup_hash}, where medium = email and address = {delta.to_update.address} and mxid = {delta.to_update.mxid}"
)
db.commit()
except CantSendEmailException:
# If we failed because we couldn't send an email move on to the next address
# to de-duplicate.
# We catch this error here rather than when sending the email because we want
# to avoid deleting rows we can't warn users about, and we don't want to
# proceed with the subsequent update because there might still be duplicates
# in the database (since we haven't deleted everything we wanted to delete).
logger.info(f"Failed to send email to {to_delete.address}")
continue
|
def update_local_associations(
sydent: Sydent,
db: sqlite3.Connection,
send_email: bool,
dry_run: bool,
test: bool = False,
) -> None:
"""Update the DB table local_threepid_associations so that all stored
emails are casefolded, and any duplicate mxid's associated with the
given email are deleted.
Setting dry_run to True means that the script is being run in dry-run mode
by the user, i.e. it will run but will not send any email nor update the database.
Setting test to True means that the function is being called as part of an automated
test, and therefore we shouldn't backoff when sending emails.
:return: None
"""
logger.info("Processing rows in local_threepid_associations")
res = db.execute(
"SELECT address, mxid FROM local_threepid_associations WHERE medium = 'email'"
"ORDER BY ts DESC"
)
# a dict that associates an email address with correspoinding mxids and lookup hashes
associations: Dict[str, List[Tuple[str, str, str]]] = {}
logger.info("Computing new hashes and signatures for local_threepid_associations")
# iterate through selected associations, casefold email, rehash it, and add to
# associations dict
for address, mxid in res.fetchall():
casefold_address = address.casefold()
# rehash email since hashes are case-sensitive
lookup_hash = calculate_lookup_hash(sydent, casefold_address)
if casefold_address in associations:
associations[casefold_address].append((address, mxid, lookup_hash))
else:
associations[casefold_address] = [(address, mxid, lookup_hash)]
# Deltas to apply to the database, associated with the casefolded address they're for.
deltas: Dict[str, Delta] = {}
# Iterate through the results, to build the deltas.
for casefold_address, assoc_tuples in associations.items():
# If the row is already in the right state and there's no duplicate, don't compute
# a delta for it.
if len(assoc_tuples) == 1 and assoc_tuples[0][0] == casefold_address:
continue
deltas[casefold_address] = Delta(
to_update=UpdateDelta(
address=assoc_tuples[0][0],
mxid=assoc_tuples[0][1],
lookup_hash=assoc_tuples[0][2],
)
)
if len(assoc_tuples) > 1:
# Iterate over all associations except for the first one, since we've already
# processed it.
deltas[casefold_address].to_delete = []
for address, mxid, _ in assoc_tuples[1:]:
deltas[casefold_address].to_delete.append(
DeleteDelta(
address=address,
mxid=mxid,
)
)
logger.info(f"{len(deltas)} rows to update in local_threepid_associations")
# Apply the deltas
for casefolded_address, delta in deltas.items():
if not test:
log_msg = f"Updating {casefolded_address}"
if delta.to_delete is not None:
log_msg += (
f" and deleting {len(delta.to_delete)} rows associated with it"
)
logger.info(log_msg)
try:
# Delete each association, and send an email mentioning the affected MXID.
if delta.to_delete is not None:
for to_delete in delta.to_delete:
if send_email and not dry_run:
# If the MXID is one that will still be associated with this
# email address after this run, don't send an email for it.
if to_delete.mxid == delta.to_update.mxid:
continue
sendEmailWithBackoff(
sydent,
to_delete.address,
to_delete.mxid,
test=test,
)
if not dry_run:
cur = db.cursor()
cur.execute(
"DELETE FROM local_threepid_associations WHERE medium = 'email' AND address = ?",
(to_delete.address,),
)
db.commit()
logger.info(
f"Deleting {to_delete.address} from table local_threepid_associations"
)
# Update the row now that there's no duplicate.
if not dry_run:
cur = db.cursor()
cur.execute(
"UPDATE local_threepid_associations SET address = ?, lookup_hash = ? WHERE medium = 'email' AND address = ? AND mxid = ?",
(
casefolded_address,
delta.to_update.lookup_hash,
delta.to_update.address,
delta.to_update.mxid,
),
)
logger.debug(
"Updating table local threepid associations setting address to %s, "
"lookup_hash to %s, where medium = email and address = %s and mxid = %s",
casefolded_address,
delta.to_update.lookup_hash,
delta.to_update.address,
delta.to_update.mxid
)
db.commit()
except CantSendEmailException:
# If we failed because we couldn't send an email move on to the next address
# to de-duplicate.
# We catch this error here rather than when sending the email because we want
# to avoid deleting rows we can't warn users about, and we don't want to
# proceed with the subsequent update because there might still be duplicates
# in the database (since we haven't deleted everything we wanted to delete).
logger.info(f"Failed to send email to {to_delete.address}")
continue
|
13,917 |
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
set_status_message(user)
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners} for inst in user.affiliated_institutions.all()] if user else []
location = geolite2.reader().get(request.remote_addr) if request.remote_addr else None
if request.host_url != settings.DOMAIN:
try:
inst_id = Institution.objects.get(domains__icontains=request.host, is_deleted=False)._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except Institution.DoesNotExist:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._id if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_profile_image': get_profile_image_url(user=user, size=25) if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'display_name': user.fullname if user else '',
'anon': {
'continent': (location or {}).get('continent', {}).get('code', None),
'country': (location or {}).get('country', {}).get('iso_code', None),
},
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'prev_status': status.pop_previous_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_domain': settings.API_DOMAIN,
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'osf_url': settings.INTERNAL_DOMAIN,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request_login_url),
'sign_up_url': util.web_url_for('auth_register', _absolute=True) + '?next=' + request_login_url,
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen': {
'public': {
'project_id': settings.KEEN['public']['project_id'],
'write_key': settings.KEEN['public']['write_key'],
},
'private': {
'project_id': settings.KEEN['private']['project_id'],
'write_key': settings.KEEN['private']['write_key'],
},
},
'institutional_landing_flag': waffle.flag_is_active(request, settings.INSTITUTIONAL_LANDING_FLAG),
'maintenance': maintenance.get_maintenance(),
'recaptcha_site_key': settings.RECAPTCHA_SITE_KEY,
'custom_citations': settings.CUSTOM_CITATIONS,
'osf_support_email': settings.OSF_SUPPORT_EMAIL,
'osf_contact_email': settings.OSF_CONTACT_EMAIL,
'wafflejs_url': '{api_domain}{waffle_url}'.format(api_domain=settings.API_DOMAIN.rstrip('/'), waffle_url=reverse('wafflejs')),
'footer_links': settings.FOOTER_LINKS,
'waffle': waffle,
'csrf_cookie_name': api_settings.CSRF_COOKIE_NAME,
}
|
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
set_status_message(user)
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners} for inst in user.affiliated_institutions.all()] if user else []
location = geolite2.reader().get(request.remote_addr) if request.remote_addr else None
if request.host_url != settings.DOMAIN:
try:
inst_id = Institution.objects.get(domains__icontains=request.host, is_deleted=False)._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except Institution.DoesNotExist:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._id if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_profile_image': get_profile_image_url(user=user, size=25) if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'display_name': user.fullname if user else '',
'anon': {
'continent': (location or {}).get('continent', {}).get('code', None),
'country': (location or {}).get('country', {}).get('iso_code', None),
},
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'prev_status': status.pop_previous_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_domain': settings.API_DOMAIN,
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'osf_url': settings.INTERNAL_DOMAIN,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request_login_url),
'sign_up_url': util.web_url_for('auth_register', _absolute=True) + '?next={}'.format(request_login_url),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen': {
'public': {
'project_id': settings.KEEN['public']['project_id'],
'write_key': settings.KEEN['public']['write_key'],
},
'private': {
'project_id': settings.KEEN['private']['project_id'],
'write_key': settings.KEEN['private']['write_key'],
},
},
'institutional_landing_flag': waffle.flag_is_active(request, settings.INSTITUTIONAL_LANDING_FLAG),
'maintenance': maintenance.get_maintenance(),
'recaptcha_site_key': settings.RECAPTCHA_SITE_KEY,
'custom_citations': settings.CUSTOM_CITATIONS,
'osf_support_email': settings.OSF_SUPPORT_EMAIL,
'osf_contact_email': settings.OSF_CONTACT_EMAIL,
'wafflejs_url': '{api_domain}{waffle_url}'.format(api_domain=settings.API_DOMAIN.rstrip('/'), waffle_url=reverse('wafflejs')),
'footer_links': settings.FOOTER_LINKS,
'waffle': waffle,
'csrf_cookie_name': api_settings.CSRF_COOKIE_NAME,
}
|
29,457 |
def parse_mapping_metrics_file(f):
"""
Mapping and aligning metrics, like the metrics computed by the Samtools Flagstat command, are available
on an aggregate level (over all input data), and on a per read group level. Unless explicitly stated,
the metrics units are in reads (ie, not in terms of pairs or alignments).
T_SRR7890936_50pc.mapping_metrics.csv
# phenotype-level metrics (tumor or normal):
TUMOR MAPPING/ALIGNING SUMMARY,,Total input reads,2200000000,100.00
TUMOR MAPPING/ALIGNING SUMMARY,,Number of duplicate marked reads,433637413,19.71
TUMOR MAPPING/ALIGNING SUMMARY,,Number of duplicate marked and mate reads removed,NA
TUMOR MAPPING/ALIGNING SUMMARY,,Number of unique reads (excl. duplicate marked reads),1766362587,80.29
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with mate sequenced,2200000000,100.00
TUMOR MAPPING/ALIGNING SUMMARY,,Reads without mate sequenced,0,0.00
TUMOR MAPPING/ALIGNING SUMMARY,,QC-failed reads,0,0.00
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads,2130883930,96.86
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads R1,1066701794,96.97
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads R2,1064182136,96.74
TUMOR MAPPING/ALIGNING SUMMARY,,Number of unique & mapped reads (excl. duplicate marked reads),1697246517,77.15
TUMOR MAPPING/ALIGNING SUMMARY,,Unmapped reads,69116070,3.14
TUMOR MAPPING/ALIGNING SUMMARY,,Singleton reads (itself mapped; mate unmapped),3917092,0.18
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads (itself & mate mapped),2126966838,96.68
TUMOR MAPPING/ALIGNING SUMMARY,,Properly paired reads,2103060370,95.59
TUMOR MAPPING/ALIGNING SUMMARY,,Not properly paired reads (discordant),23906468,1.09
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads mapped to different chromosomes,17454370,0.82
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads mapped to different chromosomes (MAPQ>=10),6463547,0.30
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [40:inf),2002661377,91.03
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [30:40),7169392,0.33
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [20:30),16644390,0.76
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [10:20),20280057,0.92
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [ 0:10),84128714,3.82
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ NA (Unmapped reads),69116070,3.14
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with indel R1,26849051,2.52
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with indel R2,24810803,2.33
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases,330000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases R1,165000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases R2,165000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped bases R1,160005269100
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped bases R2,159627320400
TUMOR MAPPING/ALIGNING SUMMARY,,Soft-clipped bases R1,1757128997,1.10
TUMOR MAPPING/ALIGNING SUMMARY,,Soft-clipped bases R2,3208748350,2.01
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R1,585802788,0.37
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R2,1155805091,0.72
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R1 (excl. indels),501394281,0.31
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R2 (excl. indels),1073788605,0.67
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases,297564555927,90.17
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases R1,155492239719,94.24
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases R2,142072316208,86.10
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases (excl. dups & clipped bases),246555769158
TUMOR MAPPING/ALIGNING SUMMARY,,Total alignments,2190085267
TUMOR MAPPING/ALIGNING SUMMARY,,Secondary alignments,0
TUMOR MAPPING/ALIGNING SUMMARY,,Supplementary (chimeric) alignments,59201337
TUMOR MAPPING/ALIGNING SUMMARY,,Estimated read length,150.00
TUMOR MAPPING/ALIGNING SUMMARY,,Average sequenced coverage over genome,102.83
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: mean,383.00
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: median,376.00
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: standard deviation,85.15
# general metrics - reporting in the header (except for DRAGEN mapping rate may be different to T and N:
TUMOR MAPPING/ALIGNING SUMMARY,,Bases in reference genome,3209286105
TUMOR MAPPING/ALIGNING SUMMARY,,Bases in target bed [% of genome],NA
TUMOR MAPPING/ALIGNING SUMMARY,,Provided sex chromosome ploidy,NA
TUMOR MAPPING/ALIGNING SUMMARY,,DRAGEN mapping rate [mil. reads/second],0.39
# then same for normal:
NORMAL MAPPING/ALIGNING SUMMARY,,Total input reads,1100000000,100.00
NORMAL MAPPING/ALIGNING SUMMARY,,Number of duplicate marked reads,123518125,11.23
...
# then tumor and normal per-read-group metrics::
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total reads in RG,2200000000,100.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of duplicate marked reads,433637413,19.71
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of duplicate marked and mate reads removed,NA
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of unique reads (excl. duplicate marked reads),1766362587,80.29
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with mate sequenced,2200000000,100.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads without mate sequenced,0,0.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,QC-failed reads,0,0.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads,2130883930,96.86
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads R1,1066701794,96.97
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads R2,1064182136,96.74
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of unique & mapped reads (excl. duplicate marked reads),1697246517,77.15
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Unmapped reads,69116070,3.14
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Singleton reads (itself mapped; mate unmapped),3917092,0.18
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads (itself & mate mapped),2126966838,96.68
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Properly paired reads,2103060370,95.59
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Not properly paired reads (discordant),23906468,1.09
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads mapped to different chromosomes,17454370,0.82
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads mapped to different chromosomes (MAPQ>=10),6463547,0.30
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [40:inf),2002661377,91.03
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [30:40),7169392,0.33
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [20:30),16644390,0.76
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [10:20),20280057,0.92
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [ 0:10),84128714,3.82
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ NA (Unmapped reads),69116070,3.14
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with indel R1,26849051,2.52
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with indel R2,24810803,2.33
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases,330000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases R1,165000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases R2,165000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped bases R1,160005269100
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped bases R2,159627320400
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Soft-clipped bases R1,1757128997,1.10
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Soft-clipped bases R2,3208748350,2.01
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R1,585802788,0.37
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R2,1155805091,0.72
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R1 (excl. indels),501394281,0.31
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R2 (excl. indels),1073788605,0.67
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases,297564555927,90.17
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases R1,155492239719,94.24
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases R2,142072316208,86.10
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases (excl. dups & clipped bases),246555769158
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total alignments,2190085267
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Secondary alignments,0
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Supplementary (chimeric) alignments,59201337
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Estimated read length,150.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Average sequenced coverage over genome,102.83
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: mean,383.01
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: median,376.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: standard deviation,87.58
# same for normal:
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Total reads in RG,1100000000,100.00
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of duplicate marked reads,NA
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of duplicate marked and mate reads removed,NA
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of unique reads (excl. duplicate marked reads),NA
...
We are reporting summary metrics in the general stats table, and per-read-group in a separate table.
"""
f['s_name'] = re.search(r'(.*).mapping_metrics.csv', f['fn']).group(1)
data_by_readgroup = defaultdict(dict)
data_by_phenotype = defaultdict(dict)
for line in f['f'].splitlines():
fields = line.split(',')
phenotype = fields[0].split('/')[0].split(' ')[0].lower() # TUMOR MAPPING -> tumor
analysis = fields[0].split('/')[1] # ALIGNING SUMMARY, ALIGNING PER RG
metric = fields[2]
value = fields[3]
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
percentage = None
if len(fields) > 4: # percentage
percentage = fields[4]
try:
percentage = float(percentage)
except ValueError:
pass
# sample-unspecific metrics are reported only in ALIGNING SUMMARY sections
if analysis == 'ALIGNING SUMMARY':
data_by_phenotype[phenotype][metric] = value
if percentage is not None:
data_by_phenotype[phenotype][metric + ' pct'] = percentage
# for sample-specific metrics, using ALIGNING PER RG because it has the sample name in the 2nd col
if analysis == 'ALIGNING PER RG':
# setting normal and tumor sample names for future use
readgroup = fields[1]
data_by_readgroup[readgroup][metric] = value
if percentage is not None:
data_by_readgroup[readgroup][metric + ' pct'] = percentage
# adding some missing values that we wanna report for consistency
for data in itertools.chain(data_by_readgroup.values(), data_by_phenotype.values()):
# fixing when deduplication wasn't performed
if 'Number of duplicate marked reads' in data and data['Number of duplicate marked reads'] == 'NA':
data['Number of duplicate marked reads'] = 0
if 'Number of duplicate marked and mate reads removed' in data and \
data['Number of duplicate marked and mate reads removed'] == 'NA':
data['Number of duplicate marked and mate reads removed'] = 0
if 'Number of unique reads (excl. duplicate marked reads)' in data and \
data['Number of unique reads (excl. duplicate marked reads)'] == 'NA':
data['Number of unique reads (excl. duplicate marked reads)'] = data['Mapped reads']
# adding alignment percentages
if 'Total alignments' in data and data['Total alignments'] > 0 and 'Secondary alignments' in data:
data['Secondary alignments pct'] = data['Secondary alignments'] / data['Total alignments'] * 100.0
# adding some missing bases percentages
if 'Total bases' in data and data['Total bases'] > 0:
if 'Q30 bases (excl. dups & clipped bases)' in data:
data['Q30 bases (excl. dups & clipped bases) pct'] = data['Q30 bases (excl. dups & clipped bases)'] / \
data['Total bases'] * 100.0
if 'Mapped bases R1' in data:
data['Mapped bases R1 pct'] = data['Mapped bases R1'] / data['Total bases'] * 100.0
if 'Mapped bases R2' in data:
data['Mapped bases R2 pct'] = data['Mapped bases R2'] / data['Total bases'] * 100.0
return data_by_readgroup, data_by_phenotype
|
def parse_mapping_metrics_file(f):
"""
Mapping and aligning metrics, like the metrics computed by the Samtools Flagstat command, are available
on an aggregate level (over all input data), and on a per read group level. Unless explicitly stated,
the metrics units are in reads (ie, not in terms of pairs or alignments).
T_SRR7890936_50pc.mapping_metrics.csv
# phenotype-level metrics (tumor or normal):
TUMOR MAPPING/ALIGNING SUMMARY,,Total input reads,2200000000,100.00
TUMOR MAPPING/ALIGNING SUMMARY,,Number of duplicate marked reads,433637413,19.71
TUMOR MAPPING/ALIGNING SUMMARY,,Number of duplicate marked and mate reads removed,NA
TUMOR MAPPING/ALIGNING SUMMARY,,Number of unique reads (excl. duplicate marked reads),1766362587,80.29
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with mate sequenced,2200000000,100.00
TUMOR MAPPING/ALIGNING SUMMARY,,Reads without mate sequenced,0,0.00
TUMOR MAPPING/ALIGNING SUMMARY,,QC-failed reads,0,0.00
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads,2130883930,96.86
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads R1,1066701794,96.97
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped reads R2,1064182136,96.74
TUMOR MAPPING/ALIGNING SUMMARY,,Number of unique & mapped reads (excl. duplicate marked reads),1697246517,77.15
TUMOR MAPPING/ALIGNING SUMMARY,,Unmapped reads,69116070,3.14
TUMOR MAPPING/ALIGNING SUMMARY,,Singleton reads (itself mapped; mate unmapped),3917092,0.18
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads (itself & mate mapped),2126966838,96.68
TUMOR MAPPING/ALIGNING SUMMARY,,Properly paired reads,2103060370,95.59
TUMOR MAPPING/ALIGNING SUMMARY,,Not properly paired reads (discordant),23906468,1.09
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads mapped to different chromosomes,17454370,0.82
TUMOR MAPPING/ALIGNING SUMMARY,,Paired reads mapped to different chromosomes (MAPQ>=10),6463547,0.30
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [40:inf),2002661377,91.03
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [30:40),7169392,0.33
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [20:30),16644390,0.76
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [10:20),20280057,0.92
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ [ 0:10),84128714,3.82
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with MAPQ NA (Unmapped reads),69116070,3.14
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with indel R1,26849051,2.52
TUMOR MAPPING/ALIGNING SUMMARY,,Reads with indel R2,24810803,2.33
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases,330000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases R1,165000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Total bases R2,165000000000
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped bases R1,160005269100
TUMOR MAPPING/ALIGNING SUMMARY,,Mapped bases R2,159627320400
TUMOR MAPPING/ALIGNING SUMMARY,,Soft-clipped bases R1,1757128997,1.10
TUMOR MAPPING/ALIGNING SUMMARY,,Soft-clipped bases R2,3208748350,2.01
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R1,585802788,0.37
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R2,1155805091,0.72
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R1 (excl. indels),501394281,0.31
TUMOR MAPPING/ALIGNING SUMMARY,,Mismatched bases R2 (excl. indels),1073788605,0.67
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases,297564555927,90.17
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases R1,155492239719,94.24
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases R2,142072316208,86.10
TUMOR MAPPING/ALIGNING SUMMARY,,Q30 bases (excl. dups & clipped bases),246555769158
TUMOR MAPPING/ALIGNING SUMMARY,,Total alignments,2190085267
TUMOR MAPPING/ALIGNING SUMMARY,,Secondary alignments,0
TUMOR MAPPING/ALIGNING SUMMARY,,Supplementary (chimeric) alignments,59201337
TUMOR MAPPING/ALIGNING SUMMARY,,Estimated read length,150.00
TUMOR MAPPING/ALIGNING SUMMARY,,Average sequenced coverage over genome,102.83
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: mean,383.00
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: median,376.00
TUMOR MAPPING/ALIGNING SUMMARY,,Insert length: standard deviation,85.15
# general metrics - reporting in the header (except for DRAGEN mapping rate may be different to T and N:
TUMOR MAPPING/ALIGNING SUMMARY,,Bases in reference genome,3209286105
TUMOR MAPPING/ALIGNING SUMMARY,,Bases in target bed [% of genome],NA
TUMOR MAPPING/ALIGNING SUMMARY,,Provided sex chromosome ploidy,NA
TUMOR MAPPING/ALIGNING SUMMARY,,DRAGEN mapping rate [mil. reads/second],0.39
# then same for normal:
NORMAL MAPPING/ALIGNING SUMMARY,,Total input reads,1100000000,100.00
NORMAL MAPPING/ALIGNING SUMMARY,,Number of duplicate marked reads,123518125,11.23
...
# then tumor and normal per-read-group metrics::
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total reads in RG,2200000000,100.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of duplicate marked reads,433637413,19.71
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of duplicate marked and mate reads removed,NA
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of unique reads (excl. duplicate marked reads),1766362587,80.29
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with mate sequenced,2200000000,100.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads without mate sequenced,0,0.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,QC-failed reads,0,0.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads,2130883930,96.86
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads R1,1066701794,96.97
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped reads R2,1064182136,96.74
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Number of unique & mapped reads (excl. duplicate marked reads),1697246517,77.15
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Unmapped reads,69116070,3.14
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Singleton reads (itself mapped; mate unmapped),3917092,0.18
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads (itself & mate mapped),2126966838,96.68
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Properly paired reads,2103060370,95.59
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Not properly paired reads (discordant),23906468,1.09
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads mapped to different chromosomes,17454370,0.82
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Paired reads mapped to different chromosomes (MAPQ>=10),6463547,0.30
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [40:inf),2002661377,91.03
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [30:40),7169392,0.33
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [20:30),16644390,0.76
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [10:20),20280057,0.92
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ [ 0:10),84128714,3.82
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with MAPQ NA (Unmapped reads),69116070,3.14
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with indel R1,26849051,2.52
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Reads with indel R2,24810803,2.33
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases,330000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases R1,165000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total bases R2,165000000000
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped bases R1,160005269100
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mapped bases R2,159627320400
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Soft-clipped bases R1,1757128997,1.10
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Soft-clipped bases R2,3208748350,2.01
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R1,585802788,0.37
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R2,1155805091,0.72
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R1 (excl. indels),501394281,0.31
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Mismatched bases R2 (excl. indels),1073788605,0.67
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases,297564555927,90.17
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases R1,155492239719,94.24
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases R2,142072316208,86.10
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Q30 bases (excl. dups & clipped bases),246555769158
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Total alignments,2190085267
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Secondary alignments,0
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Supplementary (chimeric) alignments,59201337
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Estimated read length,150.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Average sequenced coverage over genome,102.83
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: mean,383.01
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: median,376.00
TUMOR MAPPING/ALIGNING PER RG,T_SRR7890936_50pc,Insert length: standard deviation,87.58
# same for normal:
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Total reads in RG,1100000000,100.00
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of duplicate marked reads,NA
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of duplicate marked and mate reads removed,NA
NORMAL MAPPING/ALIGNING PER RG,N_SRR7890889,Number of unique reads (excl. duplicate marked reads),NA
...
We are reporting summary metrics in the general stats table, and per-read-group in a separate table.
"""
f['s_name'] = re.search(r'(.*).mapping_metrics.csv', f['fn']).group(1)
data_by_readgroup = defaultdict(dict)
data_by_phenotype = defaultdict(dict)
for line in f['f'].splitlines():
fields = line.split(',')
phenotype = fields[0].split('/')[0].split(' ')[0].lower() # TUMOR MAPPING -> tumor
analysis = fields[0].split('/')[1] # ALIGNING SUMMARY, ALIGNING PER RG
metric = fields[2]
value = fields[3]
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
percentage = None
if len(fields) > 4: # percentage
percentage = fields[4]
try:
percentage = float(percentage)
except ValueError:
pass
# sample-unspecific metrics are reported only in ALIGNING SUMMARY sections
if analysis == 'ALIGNING SUMMARY':
data_by_phenotype[phenotype][metric] = value
if percentage is not None:
data_by_phenotype[phenotype][metric + ' pct'] = percentage
# for sample-specific metrics, using ALIGNING PER RG because it has the sample name in the 2nd col
if analysis == 'ALIGNING PER RG':
# setting normal and tumor sample names for future use
readgroup = fields[1]
data_by_readgroup[readgroup][metric] = value
if percentage is not None:
data_by_readgroup[readgroup][metric + ' pct'] = percentage
# adding some missing values that we wanna report for consistency
for data in itertools.chain(data_by_readgroup.values(), data_by_phenotype.values()):
# fixing when deduplication wasn't performed
if data.get('Number of duplicate marked reads') == 'NA':
data['Number of duplicate marked reads'] = 0
if 'Number of duplicate marked and mate reads removed' in data and \
data['Number of duplicate marked and mate reads removed'] == 'NA':
data['Number of duplicate marked and mate reads removed'] = 0
if 'Number of unique reads (excl. duplicate marked reads)' in data and \
data['Number of unique reads (excl. duplicate marked reads)'] == 'NA':
data['Number of unique reads (excl. duplicate marked reads)'] = data['Mapped reads']
# adding alignment percentages
if 'Total alignments' in data and data['Total alignments'] > 0 and 'Secondary alignments' in data:
data['Secondary alignments pct'] = data['Secondary alignments'] / data['Total alignments'] * 100.0
# adding some missing bases percentages
if 'Total bases' in data and data['Total bases'] > 0:
if 'Q30 bases (excl. dups & clipped bases)' in data:
data['Q30 bases (excl. dups & clipped bases) pct'] = data['Q30 bases (excl. dups & clipped bases)'] / \
data['Total bases'] * 100.0
if 'Mapped bases R1' in data:
data['Mapped bases R1 pct'] = data['Mapped bases R1'] / data['Total bases'] * 100.0
if 'Mapped bases R2' in data:
data['Mapped bases R2 pct'] = data['Mapped bases R2'] / data['Total bases'] * 100.0
return data_by_readgroup, data_by_phenotype
|
7,045 |
def get_option_parser():
parser = COP(
__doc__, comms=True, prep=True,
argdoc=[("[REG]", "Workflow name")]
)
parser.add_option(
"--flow-name",
help="Install into ~/cylc-run/<flow_name>/runN ",
action="store",
metavar="FLOW_NAME",
default=None,
dest="flow_name")
parser.add_option(
"--directory", "-C",
help="Install the workflow found in path specfied.",
action="store",
metavar="PATH/TO/FLOW",
default=None,
dest="source")
parser.add_option(
"--run-name",
help="Name the run.",
action="store",
metavar="RUN_NAME",
default=None,
dest="run_name")
parser.add_option(
"--no-run-name",
help="Install the workflow directly into ~/cylc-run/<flow_name>",
action="store_true",
default=False,
dest="no_run_name")
parser.add_option(
"--no-symlink-dirs",
help="Use this option to override creating default local symlinks.",
action="store_true",
default=False,
dest="no_symlinks")
# If cylc-rose plugin is available ad the --option/-O config
try:
__import__('cylc.rose')
parser.add_option(
"--opt-conf-key", "-O",
help=(
"Use optional Rose Config Setting"
"(If Cylc-Rose is installed)"
),
action="append",
default=[],
dest="opt_conf_keys"
)
parser.add_option(
"--define", '-D',
help=(
"Each of these overrides the `[SECTION]KEY` setting in a "
"`rose-workflow.conf` file. "
"Can be used to disable a setting using the syntax "
"`--define=[SECTION]!KEY` or even `--define=[!SECTION]`."
),
action="append",
default=[],
dest="defines"
)
parser.add_option(
"--define-workflow", "--define-flow", '-S',
help=(
"As `--define`, but with an implicit `[SECTION]` for "
"workflow variables."
),
action="append",
default=[],
dest="define_workflows"
)
except ImportError:
pass
return parser
|
def get_option_parser():
parser = COP(
__doc__, comms=True, prep=True,
argdoc=[("[REG]", "Workflow name")]
)
parser.add_option(
"--flow-name",
help="Install into ~/cylc-run/<flow_name>/runN ",
action="store",
metavar="FLOW_NAME",
default=None,
dest="flow_name")
parser.add_option(
"--directory", "-C",
help="Install the workflow found in path specfied.",
action="store",
metavar="PATH/TO/FLOW",
default=None,
dest="source")
parser.add_option(
"--run-name",
help="Name the run.",
action="store",
metavar="RUN_NAME",
default=None,
dest="run_name")
parser.add_option(
"--no-run-name",
help="Install the workflow directly into ~/cylc-run/<flow_name>",
action="store_true",
default=False,
dest="no_run_name")
parser.add_option(
"--no-symlink-dirs",
help="Use this option to override creating default local symlinks.",
action="store_true",
default=False,
dest="no_symlinks")
# If cylc-rose plugin is available ad the --option/-O config
try:
__import__('cylc.rose')
parser.add_option(
"--opt-conf-key", "-O",
help=(
"Use optional Rose Config Setting"
"(If Cylc-Rose is installed)"
),
action="append",
default=[],
dest="opt_conf_keys"
)
parser.add_option(
"--define", '-D',
help=(
"Each of these overrides the `[SECTION]KEY` setting in a "
"`rose-suite.conf` file. "
"Can be used to disable a setting using the syntax "
"`--define=[SECTION]!KEY` or even `--define=[!SECTION]`."
),
action="append",
default=[],
dest="defines"
)
parser.add_option(
"--define-workflow", "--define-flow", '-S',
help=(
"As `--define`, but with an implicit `[SECTION]` for "
"workflow variables."
),
action="append",
default=[],
dest="define_workflows"
)
except ImportError:
pass
return parser
|
8,955 |
def find_sopel_modules_plugins():
"""List plugins from ``sopel_modules.*``.
:return: yield instance of :class:`~.handlers.PyModulePlugin`
configured for ``sopel_modules.*``
Before entry point plugins, the only way to package a plugin was to follow
the :pep:`382` by using the ``sopel_modules`` namespace. This function is
responsible to load such plugins.
"""
try:
import sopel_modules
except ImportError:
return
for plugin_dir in set(sopel_modules.__path__):
for name, _ in _list_plugin_filenames(plugin_dir):
yield handlers.PyModulePlugin(name, 'sopel_modules')
|
def find_sopel_modules_plugins():
"""List plugins from ``sopel_modules.*``.
:return: yield instance of :class:`~.handlers.PyModulePlugin`
configured for ``sopel_modules.*``
Before entry point plugins, the only way to package a plugin was to follow
:pep:`382` by using the ``sopel_modules`` namespace. This function is
responsible to load such plugins.
"""
try:
import sopel_modules
except ImportError:
return
for plugin_dir in set(sopel_modules.__path__):
for name, _ in _list_plugin_filenames(plugin_dir):
yield handlers.PyModulePlugin(name, 'sopel_modules')
|
7,892 |
def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
"""Convert point-wise cross section to multipole data via vector fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
30,811 |
def main():
try:
return_results(feed_related_indicator())
except Exception as e:
return_error(f'Error : {str(e)}')
|
def main():
try:
return_results(feed_related_indicator())
except Exception as e:
return_error(f'Failed to execute FeedRelatedIndicatorsWidget. Error: {str(e)}')
|
36,243 |
def umap(
adata,
min_dist=0.5,
spread=1.0,
n_components=2,
maxiter=None,
alpha=1.0,
gamma=1.0,
negative_sample_rate=5,
init_pos='spectral',
random_state=0,
a=None,
b=None,
copy=False,
method='umap'
):
"""Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
min_dist : `float`, optional (default: 0.5)
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread : `float` (optional, default 1.0)
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components : `int`, optional (default: 2)
The number of dimensions of the embedding.
maxiter : `int`, optional (default: `None`)
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha : `float`, optional (default: 1.0)
The initial learning rate for the embedding optimization.
gamma : `float` (optional, default 1.0)
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate : `int` (optional, default 5)
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos : `string` or `np.array`, optional (default: 'spectral')
How to initialize the low dimensional embedding. Called `init` in the
original UMAP.
Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state : `int`, `RandomState` or `None`, optional (default: 0)
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a : `float` (optional, default `None`)
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b : `float` (optional, default `None`)
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
method : {{'umap', 'rapids'}} (default: `'umap'`)
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if 'neighbors' not in adata.uns:
raise ValueError(
'Did not find \'neighbors/connectivities\'. Run `sc.pp.neighbors` first.')
start = logg.info('computing UMAP')
if ('params' not in adata.uns['neighbors']
or adata.uns['neighbors']['params']['method'] != 'umap'):
logg.warning('neighbors/connectivities have not been computed using umap')
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == 'paga':
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
neigh_params = adata.uns['neighbors']['params']
X = _choose_representation(
adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)
if method == 'umap':
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns['neighbors']['connectivities'].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get('metric', 'euclidean'),
neigh_params.get('metric_kwds', {}),
verbose=settings.verbosity > 3,
)
elif method == 'rapids':
from cuml import UMAP
n_neighbors = adata.uns['neighbors']['params']['n_neighbors']
n_epochs = 500 if maxiter is None else maxiter # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates
logg.info(
' finished',
time=start,
deep=(
'added\n'
" 'X_umap', UMAP coordinates (adata.obsm)"
),
)
return adata if copy else None
|
def umap(
adata,
min_dist=0.5,
spread=1.0,
n_components=2,
maxiter=None,
alpha=1.0,
gamma=1.0,
negative_sample_rate=5,
init_pos='spectral',
random_state=0,
a=None,
b=None,
copy=False,
method='umap'
):
"""Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout Scanpy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
min_dist : `float`, optional (default: 0.5)
The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is
0.1.
spread : `float` (optional, default 1.0)
The effective scale of embedded points. In combination with `min_dist`
this determines how clustered/clumped the embedded points are.
n_components : `int`, optional (default: 2)
The number of dimensions of the embedding.
maxiter : `int`, optional (default: `None`)
The number of iterations (epochs) of the optimization. Called `n_epochs`
in the original UMAP.
alpha : `float`, optional (default: 1.0)
The initial learning rate for the embedding optimization.
gamma : `float` (optional, default 1.0)
Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight
being given to negative samples.
negative_sample_rate : `int` (optional, default 5)
The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos : `string` or `np.array`, optional (default: 'spectral')
How to initialize the low dimensional embedding. Called `init` in the
original UMAP.
Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state : `int`, `RandomState` or `None`, optional (default: 0)
If `int`, `random_state` is the seed used by the random number generator;
If `RandomState`, `random_state` is the random number generator;
If `None`, the random number generator is the `RandomState` instance used
by `np.random`.
a : `float` (optional, default `None`)
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
b : `float` (optional, default `None`)
More specific parameters controlling the embedding. If `None` these
values are set automatically as determined by `min_dist` and
`spread`.
copy : `bool` (default: `False`)
Return a copy instead of writing to adata.
method : {`'umap'`, `'rapids'`} (default: `'umap'`)
Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field
UMAP coordinates of data.
"""
adata = adata.copy() if copy else adata
if 'neighbors' not in adata.uns:
raise ValueError(
'Did not find \'neighbors/connectivities\'. Run `sc.pp.neighbors` first.')
start = logg.info('computing UMAP')
if ('params' not in adata.uns['neighbors']
or adata.uns['neighbors']['params']['method'] != 'umap'):
logg.warning('neighbors/connectivities have not been computed using umap')
from umap.umap_ import find_ab_params, simplicial_set_embedding
if a is None or b is None:
a, b = find_ab_params(spread, min_dist)
else:
a = a
b = b
if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
init_coords = adata.obsm[init_pos]
elif isinstance(init_pos, str) and init_pos == 'paga':
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
neigh_params = adata.uns['neighbors']['params']
X = _choose_representation(
adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)
if method == 'umap':
# the data matrix X is really only used for determining the number of connected components
# for the init condition in the UMAP embedding
n_epochs = 0 if maxiter is None else maxiter
X_umap = simplicial_set_embedding(
X,
adata.uns['neighbors']['connectivities'].tocoo(),
n_components,
alpha,
a,
b,
gamma,
negative_sample_rate,
n_epochs,
init_coords,
random_state,
neigh_params.get('metric', 'euclidean'),
neigh_params.get('metric_kwds', {}),
verbose=settings.verbosity > 3,
)
elif method == 'rapids':
from cuml import UMAP
n_neighbors = adata.uns['neighbors']['params']['n_neighbors']
n_epochs = 500 if maxiter is None else maxiter # 0 is not a valid value for rapids, unlike original umap
X_contiguous = np.ascontiguousarray(X, dtype=np.float32)
umap = UMAP(
n_neighbors=n_neighbors,
n_components=n_components,
n_epochs=n_epochs,
learning_rate=alpha,
init=init_pos,
min_dist=min_dist,
spread=spread,
negative_sample_rate=negative_sample_rate,
a=a,
b=b,
verbose=settings.verbosity > 3,
)
X_umap = umap.fit_transform(X_contiguous)
adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates
logg.info(
' finished',
time=start,
deep=(
'added\n'
" 'X_umap', UMAP coordinates (adata.obsm)"
),
)
return adata if copy else None
|
58,176 |
def main() -> None: # pragma: no cover
"""main function, parses params and runs command functions
:return:
:rtype:
"""
workspace = demisto.params().get('Workspace', "")
server_url = demisto.params().get('server_url', "")
user_name = demisto.params().get('UserName', "").get('identifier', "")
app_password = demisto.params().get('UserName', "").get('password', "")
repository = demisto.params().get('repository', "")
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
auth = (user_name, app_password)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
workspace=workspace,
server_url=server_url,
auth=auth,
proxy=proxy,
verify=verify_certificate,
repository=repository
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
str_result: str = test_module(client)
return_results(str_result)
elif demisto.command() == 'bitbucket-project-list':
result: CommandResults = project_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-open-branch-list':
result = open_branch_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-get':
result = branch_get_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-create':
result = branch_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-delete':
result = branch_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-commit-create':
result = commit_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-commit-list':
result = commit_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-file-delete':
result = file_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-raw-file-get':
result_list = raw_file_get_command(client, demisto.args())
return_results(result_list)
elif demisto.command() == 'bitbucket-issue-create':
result = issue_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-list':
result = issue_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-update':
result = issue_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-create':
result = pull_request_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-update':
result = pull_request_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-list':
result = pull_request_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-create':
result = issue_comment_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-delete':
result = issue_comment_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-update':
result = issue_comment_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-list':
result = issue_comment_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-create':
result = pull_request_comment_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-list':
result = pull_request_comment_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-update':
result = pull_request_comment_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-delete':
result = pull_request_comment_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-workspace-member-list':
result = workspace_member_list_command(client, demisto.args())
return_results(result)
else:
raise NotImplementedError('This command is not implemented yet.')
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None: # pragma: no cover
workspace = demisto.params().get('Workspace', "")
server_url = demisto.params().get('server_url', "")
user_name = demisto.params().get('UserName', "").get('identifier', "")
app_password = demisto.params().get('UserName', "").get('password', "")
repository = demisto.params().get('repository', "")
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
auth = (user_name, app_password)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
workspace=workspace,
server_url=server_url,
auth=auth,
proxy=proxy,
verify=verify_certificate,
repository=repository
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
str_result: str = test_module(client)
return_results(str_result)
elif demisto.command() == 'bitbucket-project-list':
result: CommandResults = project_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-open-branch-list':
result = open_branch_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-get':
result = branch_get_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-create':
result = branch_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-branch-delete':
result = branch_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-commit-create':
result = commit_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-commit-list':
result = commit_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-file-delete':
result = file_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-raw-file-get':
result_list = raw_file_get_command(client, demisto.args())
return_results(result_list)
elif demisto.command() == 'bitbucket-issue-create':
result = issue_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-list':
result = issue_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-update':
result = issue_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-create':
result = pull_request_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-update':
result = pull_request_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-list':
result = pull_request_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-create':
result = issue_comment_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-delete':
result = issue_comment_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-update':
result = issue_comment_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-issue-comment-list':
result = issue_comment_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-create':
result = pull_request_comment_create_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-list':
result = pull_request_comment_list_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-update':
result = pull_request_comment_update_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-pull-request-comment-delete':
result = pull_request_comment_delete_command(client, demisto.args())
return_results(result)
elif demisto.command() == 'bitbucket-workspace-member-list':
result = workspace_member_list_command(client, demisto.args())
return_results(result)
else:
raise NotImplementedError('This command is not implemented yet.')
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
33,027 |
def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None:
if not do_run_build_script:
return
if for_cache:
return
if args.build_script is None:
return
with complete_step('Copying in build script and sources'):
copy_file(args.build_script, os.path.join(root, "root", os.path.basename(args.build_script)))
if args.build_sources is not None:
target = os.path.join(root, "root/src")
if args.source_file_transfer in (SourceFileTransfer.copy_git_others,
SourceFileTransfer.copy_git_cached,
SourceFileTransfer.copy_git_more):
copy_git_files(args.build_sources, target, source_file_transfer=args.source_file_transfer)
elif args.source_file_transfer == SourceFileTransfer.copy_all:
ignore = shutil.ignore_patterns('.git',
'.mkosi-*',
'*.cache-pre-dev',
'*.cache-pre-inst',
os.path.basename(args.output_dir)+"/" if args.output_dir else "mkosi.output/", # NOQA: E501
os.path.basename(args.cache_path)+"/" if args.cache_path else "mkosi.cache/", # NOQA: E501
os.path.basename(args.build_dir)+"/" if args.build_dir else "mkosi.builddir/", # NOQA: E501
os.path.basename(args.include_dir)+"/" if args.include_dir else "mkosi.includedir/", # NOQA: E501
os.path.basename(args.install_dir)+"/" if args.install_dir else "mkosi.installdir/")
shutil.copytree(args.build_sources, target, symlinks=True, ignore=ignore)
|
def install_build_src(args: CommandLineArguments, root: str, do_run_build_script: bool, for_cache: bool) -> None:
if not do_run_build_script:
return
if for_cache:
return
if args.build_script is None:
return
with complete_step('Copying in build script and sources'):
copy_file(args.build_script, os.path.join(root, "root", os.path.basename(args.build_script)))
if args.build_sources is not None:
target = os.path.join(root, "root/src")
if args.source_file_transfer in (SourceFileTransfer.copy_git_others,
SourceFileTransfer.copy_git_cached,
SourceFileTransfer.copy_git_more):
copy_git_files(args.build_sources, target, source_file_transfer=args.source_file_transfer)
elif args.source_file_transfer == SourceFileTransfer.copy_all:
ignore = shutil.ignore_patterns('.git',
'.mkosi-*',
'*.cache-pre-dev',
'*.cache-pre-inst',
os.path.basename(args.output_dir)+"/" if args.output_dir else "mkosi.output/", # NOQA: E501
os.path.basename(args.cache_path)+"/" if args.cache_path else "mkosi.cache/", # NOQA: E501
os.path.basename(args.build_dir)+"/" if args.build_dir else "mkosi.builddir/", # NOQA: E501
os.path.basename(args.include_dir)+"/" if args.include_dir else "mkosi.includedir/", # NOQA: E501
os.path.basename(args.install_dir)+"/" if args.install_dir else "mkosi.installdir/") # NOQA: E501
shutil.copytree(args.build_sources, target, symlinks=True, ignore=ignore)
|
52,265 |
def _make_correct_account_type(env):
query = """
UPDATE account_account as ac
SET user_type_id=aat.user_type_id
FROM account_account_template as aat
LEFT JOIN account_chart_template as act
ON aat.chart_template_id = act.id
LEFT JOIN res_company as c
ON c.chart_template_id = act.id
WHERE ac.code =
CASE
WHEN
act.code_digits < LENGTH(aat.code) THEN aat.code
ELSE
CONCAT(aat.code,
REPEAT('0',act.code_digits - LENGTH(aat.code)))
END
AND ac.user_type_id != aat.user_type_id
AND ac.company_id = c.id;
UPDATE account_account as ac
SET internal_type=at.type,
internal_group=at.internal_group
FROM account_account_type as at
WHERE ac.user_type_id=at.id;
"""
openupgrade.logged_query(
env.cr,
query,
)
|
def _make_correct_account_type(env):
query = """
UPDATE account_account as ac
SET user_type_id=aat.user_type_id
FROM account_account_template as aat
JOIN account_chart_template as act
ON aat.chart_template_id = act.id
JOIN res_company as c
ON c.chart_template_id = act.id
WHERE ac.code =
CASE
WHEN
act.code_digits < LENGTH(aat.code) THEN aat.code
ELSE
CONCAT(aat.code,
REPEAT('0',act.code_digits - LENGTH(aat.code)))
END
AND ac.user_type_id != aat.user_type_id
AND ac.company_id = c.id;
UPDATE account_account as ac
SET internal_type=at.type,
internal_group=at.internal_group
FROM account_account_type as at
WHERE ac.user_type_id=at.id;
"""
openupgrade.logged_query(
env.cr,
query,
)
|
24,905 |
def _node_in_orelse(loop, node):
for child in getattr(loop, "orelse", []):
if child == node:
return True
if any(kid == node for kid in child.get_children()):
return True
return False
|
def _node_in_orelse(loop:Union[nodes.While, nodes.For], node: Union[nodes.Break, nodes.Return]) -> bool:
for child in getattr(loop, "orelse", []):
if child == node:
return True
if any(kid == node for kid in child.get_children()):
return True
return False
|
23,613 |
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohms]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
def singlediode(photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts=None,
method='lambertw'):
r"""
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]_
.. math::
I = I_L -
I_0 \left[
\exp \left(\frac{V+I R_s}{n N_s V_{th}} \right)-1
\right] -
\frac{V + I R_s}{R_{sh}}
for :math:`I` and :math:`V` when given :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` which are described later. Returns a DataFrame
which contains the 5 points on the I-V curve specified in
SAND2004-3535 [3]_. If all :math:`I_L, I_0, R_s, R_{sh},` and
:math:`n N_s V_{th}` are scalar, a single curve will be returned, if any
are Series (of the same length), multiple IV curves will be calculated.
The input parameters can be calculated using
:py:func:`~pvlib.pvsystem.calcparams_desoto` from meteorological data.
Parameters
----------
photocurrent : numeric
Light-generated current :math:`I_L` (photocurrent) under desired
IV curve conditions. ``0 <= photocurrent``. [A]
saturation_current : numeric
Diode saturation :math:`I_0` current under desired IV curve
conditions. ``0 < saturation_current``. [A]
resistance_series : numeric
Series resistance :math:`R_s` under desired IV curve conditions.
``0 <= resistance_series < numpy.inf``. [ohm]
resistance_shunt : numeric
Shunt resistance :math:`R_{sh}` under desired IV curve conditions.
``0 < resistance_shunt <= numpy.inf``. [ohms]
nNsVth : numeric
The product of three components. 1) The usual diode ideal factor
:math:`n`, 2) the number of cells in series :math:`N_s`, and 3)
the cell thermal voltage under the desired IV curve conditions
:math:`V_{th}`. The thermal voltage of the cell (in volts) may be
calculated as :math:`k_B T_c / q`, where :math:`k_B` is
Boltzmann's constant (J/K), :math:`T_c` is the temperature of the p-n
junction in Kelvin, and :math:`q` is the charge of an electron
(coulombs). ``0 < nNsVth``. [V]
ivcurve_pnts : None or int, default None
Number of points in the desired IV curve. If None or 0, no
IV curves will be produced.
method : str, default 'lambertw'
Determines the method used to calculate points on the IV curve. The
options are ``'lambertw'``, ``'newton'``, or ``'brentq'``.
Returns
-------
OrderedDict or DataFrame
The returned dict-like object always contains the keys/columns:
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
If ivcurve_pnts is greater than 0, the output dictionary will also
include the keys:
* i - IV curve current in amperes.
* v - IV curve voltage in volts.
The output will be an OrderedDict if photocurrent is a scalar,
array, or ivcurve_pnts is not None.
The output will be a DataFrame if photocurrent is a Series and
ivcurve_pnts is None.
Notes
-----
If the method is ``'lambertw'`` then the solution employed to solve the
implicit diode equation utilizes the Lambert W function to obtain an
explicit function of :math:`V=f(I)` and :math:`I=f(V)` as shown in [2]_.
If the method is ``'newton'`` then the root-finding Newton-Raphson method
is used. It should be safe for well behaved IV-curves, but the ``'brentq'``
method is recommended for reliability.
If the method is ``'brentq'`` then Brent's bisection search method is used
that guarantees convergence by bounding the voltage between zero and
open-circuit.
If the method is either ``'newton'`` or ``'brentq'`` and ``ivcurve_pnts``
are indicated, then :func:`pvlib.singlediode.bishop88` [4]_ is used to
calculate the points on the IV curve points at diode voltages from zero to
open-circuit voltage with a log spacing that gets closer as voltage
increases. If the method is ``'lambertw'`` then the calculated points on
the IV curve are linearly spaced.
References
----------
.. [1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics" ISBN
0 86758 909 4
.. [2] A. Jain, A. Kapoor, "Exact analytical solutions of the
parameters of real solar cells using Lambert W-function", Solar
Energy Materials and Solar Cells, 81 (2004) 269-277.
.. [3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
.. [4] "Computer simulation of the effects of electrical mismatches in
photovoltaic cell interconnection circuits" JW Bishop, Solar Cell (1988)
https://doi.org/10.1016/0379-6787(88)90059-2
See also
--------
sapm
calcparams_desoto
pvlib.singlediode.bishop88
"""
# Calculate points on the IV curve using the LambertW solution to the
# single diode equation
if method.lower() == 'lambertw':
out = _singlediode._lambertw(
photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth, ivcurve_pnts
)
i_sc, v_oc, i_mp, v_mp, p_mp, i_x, i_xx = out[:7]
if ivcurve_pnts:
ivcurve_i, ivcurve_v = out[7:]
else:
# Calculate points on the IV curve using either 'newton' or 'brentq'
# methods. Voltages are determined by first solving the single diode
# equation for the diode voltage V_d then backing out voltage
args = (photocurrent, saturation_current, resistance_series,
resistance_shunt, nNsVth) # collect args
v_oc = _singlediode.bishop88_v_from_i(
0.0, *args, method=method.lower()
)
i_mp, v_mp, p_mp = _singlediode.bishop88_mpp(
*args, method=method.lower()
)
i_sc = _singlediode.bishop88_i_from_v(
0.0, *args, method=method.lower()
)
i_x = _singlediode.bishop88_i_from_v(
v_oc / 2.0, *args, method=method.lower()
)
i_xx = _singlediode.bishop88_i_from_v(
(v_oc + v_mp) / 2.0, *args, method=method.lower()
)
# calculate the IV curve if requested using bishop88
if ivcurve_pnts:
vd = v_oc * (
(11.0 - np.logspace(np.log10(11.0), 0.0,
ivcurve_pnts)) / 10.0
)
ivcurve_i, ivcurve_v, _ = _singlediode.bishop88(vd, *args)
out = OrderedDict()
out['i_sc'] = i_sc
out['v_oc'] = v_oc
out['i_mp'] = i_mp
out['v_mp'] = v_mp
out['p_mp'] = p_mp
out['i_x'] = i_x
out['i_xx'] = i_xx
if ivcurve_pnts:
out['v'] = ivcurve_v
out['i'] = ivcurve_i
if isinstance(photocurrent, pd.Series) and not ivcurve_pnts:
out = pd.DataFrame(out, index=photocurrent.index)
return out
|
56,912 |
def when_mentioned(bot: _Bot, msg: Message, /) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
.. versionchanged:: 2.0
``bot`` parameter is now positional-only.
.. versionchanged:: 2.0
``msg`` parameter is now positional-only.
"""
# bot.user will never be None when this is called
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore
|
def when_mentioned(bot: _Bot, msg: Message, /) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
.. versionchanged:: 2.0
``bot`` and ``msg`` parameters are now positional-only.
"""
# bot.user will never be None when this is called
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore
|
14,133 |
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
If the `mask` is a tuple of `(minx, miny, maxx, maxy)`, a faster rectangle
clipping algorithm will be used. Note that this can lead to slightly different
results in edge cases, e.g. if a line would be reduced to a point, this point might
not be returned.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon, tuple
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
If the mask is a tuple of `(minx, miny, maxx, maxy)`, `clip` will use a faster
rectangle clipping (`.clip_by_rect()`), possibly leading to slightly different
results.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
See also
--------
GeoDataFrame.clip : equivalent GeoDataFrame method
GeoSeries.clip : equivalent GeoSeries method
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon, tuple)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries,"
f"(Multi)Polygon or 4 element tuple, got {type(mask)}"
)
if isinstance(mask, tuple) and len(mask) != 4:
raise TypeError(
"If 'mask' is a tuple, it must have four values (minx, miny, maxx, maxy)"
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
elif isinstance(mask, tuple):
box_mask = mask
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
combined_mask = mask.geometry.unary_union
else:
combined_mask = mask
clipped = _clip_gdf_with_mask(gdf, combined_mask)
if keep_geom_type:
geomcoll_concat = (clipped.geom_type == "GeometryCollection").any()
geomcoll_orig = (gdf.geom_type == "GeometryCollection").any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
clipped.geom_type.isin(polys).any(),
clipped.geom_type.isin(lines).any(),
clipped.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
clipped = clipped.explode(index_parts=False)
if orig_type in polys:
clipped = clipped.loc[clipped.geom_type.isin(polys)]
elif orig_type in lines:
clipped = clipped.loc[clipped.geom_type.isin(lines)]
return clipped
|
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
If the `mask` is a tuple of `(minx, miny, maxx, maxy)`, a faster rectangle
clipping algorithm will be used. Note that this can lead to slightly different
results in edge cases, e.g. if a line would be reduced to a point, this point might
not be returned.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon, tuple
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
If the mask is a tuple of `(minx, miny, maxx, maxy)`, `clip` will use a faster
rectangle clipping (`.clip_by_rect()`), possibly leading to slightly different
results.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
See also
--------
GeoDataFrame.clip : equivalent GeoDataFrame method
GeoSeries.clip : equivalent GeoSeries method
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon, tuple)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries,"
f"(Multi)Polygon or a tuple, got {type(mask)}"
)
if isinstance(mask, tuple) and len(mask) != 4:
raise TypeError(
"If 'mask' is a tuple, it must have four values (minx, miny, maxx, maxy)"
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
elif isinstance(mask, tuple):
box_mask = mask
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
combined_mask = mask.geometry.unary_union
else:
combined_mask = mask
clipped = _clip_gdf_with_mask(gdf, combined_mask)
if keep_geom_type:
geomcoll_concat = (clipped.geom_type == "GeometryCollection").any()
geomcoll_orig = (gdf.geom_type == "GeometryCollection").any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
clipped.geom_type.isin(polys).any(),
clipped.geom_type.isin(lines).any(),
clipped.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
clipped = clipped.explode(index_parts=False)
if orig_type in polys:
clipped = clipped.loc[clipped.geom_type.isin(polys)]
elif orig_type in lines:
clipped = clipped.loc[clipped.geom_type.isin(lines)]
return clipped
|
31,919 |
def create_output(results, endpoint, only_answers):
if only_answers:
try:
output = CommandResults(
outputs_prefix=f'DNS-over-HTTPS.{endpoint}',
outputs_key_field='',
outputs=results['Answer']
)
return output
except LookupError:
return 'No results found'
else:
output = CommandResults(
outputs_prefix=f'DNS-over-HTTPS.{endpoint}',
outputs_key_field='',
outputs=results
)
return output
|
def create_output(results, endpoint, only_answers):
if only_answers:
try:
output = CommandResults(
outputs_prefix=f'DNSOverHTTPS.{endpoint}',
outputs_key_field='',
outputs=results['Answer']
)
return output
except LookupError:
return 'No results found'
else:
output = CommandResults(
outputs_prefix=f'DNS-over-HTTPS.{endpoint}',
outputs_key_field='',
outputs=results
)
return output
|
12,777 |
def main(path=None):
verbose = 0
if path is None:
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
#print("# building references graph ...")
graph = {} # oid -> refs
posoidv = list((pos, oid) for (oid, pos) in fs._index.items()) # [] of (pos, oid)
posoidv.sort() # access objects in order of pos↑ (optimize disk IO)
for _,oid in posoidv:
try:
data, serial = load_current(fs, oid)
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
continue
except:
if verbose:
traceback.print_exc()
noload[oid] = 1
continue
refs = get_refs(data)
graph[oid] = refs
#print("# verifying reachability ...")
oidv = list(graph.keys())
oidv.sort() # verify objects in order of oid↑ (useful for human perception; stable output)
for oid in oidv:
refs = graph[oid]
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in graph:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
|
def main(path=None):
verbose = 0
if path is None:
import sys
import getopt
opts, args = getopt.getopt(sys.argv[1:], "v")
for k, v in opts:
if k == "-v":
verbose += 1
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
# This is what happens if undo is applied to the transaction creating
# the object (the oid is still in the index, but its current data
# record has a backpointer of 0, and POSKeyError is raised then
# because of that backpointer).
undone = {}
# Set of oids that were present in the index but failed to load.
# This does not include oids in undone.
noload = {}
#print("# building references graph ...")
graph = {} # oid -> refs
posoidv = list((pos, oid) for (oid, pos) in fs._index.items()) # [] of (pos, oid)
posoidv.sort() # access objects in order of ascending file position (optimize disk IO)
for _,oid in posoidv:
try:
data, serial = load_current(fs, oid)
except (KeyboardInterrupt, SystemExit):
raise
except POSKeyError:
undone[oid] = 1
continue
except:
if verbose:
traceback.print_exc()
noload[oid] = 1
continue
refs = get_refs(data)
graph[oid] = refs
#print("# verifying reachability ...")
oidv = list(graph.keys())
oidv.sort() # verify objects in order of oid↑ (useful for human perception; stable output)
for oid in oidv:
refs = graph[oid]
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
if ref not in graph:
missing.append((ref, klass, "missing"))
if ref in noload:
missing.append((ref, klass, "failed to load"))
if ref in undone:
missing.append((ref, klass, "object creation was undone"))
if missing:
report(oid, data, serial, missing)
|
9,060 |
def command(*command_list: str) -> Callable:
"""Decorate a function to set one or more commands that should trigger it.
:param str command_list: one or more command name(s) to match
This decorator can be used to add multiple commands to one callable in a
single line. The resulting match object will have the command as the first
group; the rest of the line, excluding leading whitespace, as the second
group; and parameters 1 through 4, separated by whitespace, as groups 3-6.
Example::
@command("hello")
# If the command prefix is "\\.", this would trigger on lines
# starting with ".hello".
@command('j', 'join')
# If the command prefix is "\\.", this would trigger on lines
# starting with either ".j" or ".join".
You can use a space in the command name to implement subcommands::
@command('main sub1', 'main sub2')
# For ".main sub1", trigger.group(1) will return "main sub1"
# For ".main sub2", trigger.group(1) will return "main sub2"
But in that case, be careful with the order of the names: if a more generic
pattern is defined first, it will have priority over less generic patterns.
So for instance, to have ``.main`` and ``.main sub`` working properly, you
need to declare them like this::
@command('main sub', 'main')
# This command will react properly to ".main sub" and ".main"
Then, you can check ``trigger.group(1)`` to know if it was used as
``main sub`` or just ``main`` in your callable. If you declare them in the
wrong order, ``.main`` will have priority and you won't be able to take
advantage of that.
Another option is to declare command with subcommands only, like this::
@command('main sub1')
# this command will be triggered on .main sub1
@command('main sub2')
# this other command will be triggered on .main sub2
In that case, ``.main`` won't trigger anything, and you won't have to
inspect the trigger's groups to know which subcommand is triggered.
.. note::
If you use this decorator multiple times, remember that the decorators
are invoked in the reverse order of appearance::
# These two decorators...
@command('hi')
@command('hello')
# ...are equivalent to this single decorator
@command('hello', 'hi')
See also the `Function Definitions`__ chapter from the Python
documentation for more information about functions and decorators.
.. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions
.. note::
The command name will be escaped to be used in a regex command. As such
it is not possible to use something like ``.command\\d+`` to catch
something like ``.command1`` or ``.command2``.
You have several options at your disposal to replace a regex in the
command name:
* use a command alias,
* parse the arguments with your own regex within your plugin callable,
* use a :func:`rule` instead
"""
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "commands"):
function.commands = []
for command in command_list:
if command not in function.commands:
function.commands.append(command)
return function
return add_attribute
|
def command(*command_list: str) -> Callable:
"""Decorate a function to set one or more commands that should trigger it.
:param str command_list: one or more command name(s) to match
This decorator can be used to add multiple commands to one callable in a
single line. The resulting match object will have the command as the first
group; the rest of the line, excluding leading whitespace, as the second
group; and parameters 1 through 4, separated by whitespace, as groups 3-6.
Example::
@command("hello")
# If the command prefix is "\\.", this would trigger on lines
# starting with ".hello".
@command('j', 'join')
# If the command prefix is "\\.", this would trigger on lines
# starting with either ".j" or ".join".
You can use a space in the command name to implement subcommands::
@command('main sub1', 'main sub2')
# For ".main sub1", trigger.group(1) will return "main sub1"
# For ".main sub2", trigger.group(1) will return "main sub2"
But in that case, be careful with the order of the names: if a more generic
pattern is defined first, it will have priority over less generic patterns.
So for instance, to have ``.main`` and ``.main sub`` working properly, you
need to declare them like this::
@command('main sub', 'main')
# This command will react properly to ".main sub" and ".main"
Then, you can check ``trigger.group(1)`` to know if it was used as
``main sub`` or just ``main`` in your callable. If you declare them in the
wrong order, ``.main`` will have priority and you won't be able to take
advantage of that.
Another option is to declare command with subcommands only, like this::
@command('main sub1')
# this command will be triggered on .main sub1
@command('main sub2')
# this other command will be triggered on .main sub2
In that case, ``.main`` won't trigger anything, and you won't have to
inspect the trigger's groups to know which subcommand is triggered.
.. note::
If you use this decorator multiple times, remember that the decorators
are invoked in the reverse order of appearance::
# These two decorators...
@command('hi')
@command('hello')
# ...are equivalent to this single decorator
@command('hello', 'hi')
See also the `Function Definitions`__ chapter from the Python
documentation for more information about functions and decorators.
.. __: https://docs.python.org/3/reference/compound_stmts.html#function-definitions
.. note::
The command name will be escaped for use in a regular expression. As such
it is not possible to use something like ``.command\\d+`` to catch
something like ``.command1`` or ``.command2``.
You have several options at your disposal to replace a regex in the
command name:
* use a command alias,
* parse the arguments with your own regex within your plugin callable,
* use a :func:`rule` instead
"""
def add_attribute(function):
function._sopel_callable = True
if not hasattr(function, "commands"):
function.commands = []
for command in command_list:
if command not in function.commands:
function.commands.append(command)
return function
return add_attribute
|
25,579 |
def get_best_routes_pfs(
chain_state: ChainState,
token_network_address: TokenNetworkAddress,
one_to_n_address: OneToNAddress,
from_address: InitiatorAddress,
to_address: TargetAddress,
amount: PaymentAmount,
previous_address: Optional[Address],
pfs_config: PFSConfig,
privkey: bytes,
) -> Tuple[Optional[str], List[RouteState], Optional[UUID]]:
try:
pfs_routes, feedback_token = query_paths(
pfs_config=pfs_config,
our_address=chain_state.our_address,
privkey=privkey,
current_block_number=chain_state.block_number,
token_network_address=token_network_address,
one_to_n_address=one_to_n_address,
chain_id=chain_state.chain_id,
route_from=from_address,
route_to=to_address,
value=amount,
)
except ServiceRequestFailed as e:
log_message = ("PFS: " + e.args[0]) if e.args[0] else None
log_info = e.args[1] if len(e.args) > 1 else {}
log.warning("An error with the path request occurred", log_message=log_message, **log_info)
return log_message, [], None
paths = []
for path_object in pfs_routes:
path = path_object["path"]
estimated_fee = path_object["estimated_fee"]
canonical_path = [to_canonical_address(node) for node in path]
# get the second entry, as the first one is the node itself
# also needs to be converted to canonical representation
partner_address = canonical_path[1]
# don't route back
if partner_address == previous_address:
continue
channel_state = views.get_channelstate_by_token_network_and_partner(
chain_state=chain_state,
token_network_address=token_network_address,
partner_address=partner_address,
)
if not channel_state:
continue
# check channel state
if channel.get_status(channel_state) != ChannelState.STATE_OPENED:
log.info(
"Channel is not opened, ignoring",
from_address=to_checksum_address(from_address),
partner_address=to_checksum_address(partner_address),
routing_source="Pathfinding Service",
)
continue
paths.append(
RouteState(
route=canonical_path,
forward_channel_id=channel_state.identifier,
estimated_fee=estimated_fee,
)
)
return None, paths, feedback_token
|
def get_best_routes_pfs(
chain_state: ChainState,
token_network_address: TokenNetworkAddress,
one_to_n_address: OneToNAddress,
from_address: InitiatorAddress,
to_address: TargetAddress,
amount: PaymentAmount,
previous_address: Optional[Address],
pfs_config: PFSConfig,
privkey: bytes,
) -> Tuple[Optional[str], List[RouteState], Optional[UUID]]:
try:
pfs_routes, feedback_token = query_paths(
pfs_config=pfs_config,
our_address=chain_state.our_address,
privkey=privkey,
current_block_number=chain_state.block_number,
token_network_address=token_network_address,
one_to_n_address=one_to_n_address,
chain_id=chain_state.chain_id,
route_from=from_address,
route_to=to_address,
value=amount,
)
except ServiceRequestFailed as e:
log_message = ("PFS: " + e.args[0]) if e.args else None
log_info = e.args[1] if len(e.args) > 1 else {}
log.warning("An error with the path request occurred", log_message=log_message, **log_info)
return log_message, [], None
paths = []
for path_object in pfs_routes:
path = path_object["path"]
estimated_fee = path_object["estimated_fee"]
canonical_path = [to_canonical_address(node) for node in path]
# get the second entry, as the first one is the node itself
# also needs to be converted to canonical representation
partner_address = canonical_path[1]
# don't route back
if partner_address == previous_address:
continue
channel_state = views.get_channelstate_by_token_network_and_partner(
chain_state=chain_state,
token_network_address=token_network_address,
partner_address=partner_address,
)
if not channel_state:
continue
# check channel state
if channel.get_status(channel_state) != ChannelState.STATE_OPENED:
log.info(
"Channel is not opened, ignoring",
from_address=to_checksum_address(from_address),
partner_address=to_checksum_address(partner_address),
routing_source="Pathfinding Service",
)
continue
paths.append(
RouteState(
route=canonical_path,
forward_channel_id=channel_state.identifier,
estimated_fee=estimated_fee,
)
)
return None, paths, feedback_token
|
58,177 |
def fetch_events_command(client: Client, first_fetch: Optional[datetime], last_run: dict, limit: int = 1000):
"""
Fetches vulnerabilities if job has succeeded, and audit logs.
Args:
client: Client class object.
first_fetch: time to first fetch from.
last_run: last run object.
limit: number of audit logs to max fetch.
Returns: vulnerabilities, audit logs and updated last run object
"""
last_fetch = last_run.get('next_fetch')
last_id_fetched = last_run.get('last_id')
vulnerabilities = []
new_last_run = {}
index = 0
if not last_fetch and first_fetch:
start_date = first_fetch.strftime(DATE_FORMAT)
else:
start_date = last_fetch # type: ignore
audit_logs: List[Dict] = []
audit_logs_from_api = client.get_audit_logs_request(from_date=start_date)
if last_id_fetched:
index = 1
for log in audit_logs_from_api:
if log.get('id') == last_id_fetched:
break
index += 1
demisto.info(f'{len(audit_logs_from_api)}')
audit_logs_from_api_len = len(audit_logs_from_api)
last_log_to_fetch = min(audit_logs_from_api_len, limit)
demisto.info(f'{index=}, {last_log_to_fetch=}')
if index < audit_logs_from_api_len and last_log_to_fetch <= audit_logs_from_api_len:
demisto.info('here again')
audit_logs.extend(audit_logs_from_api[index:last_log_to_fetch])
# trying to fetch vulnerabilities
integration_context = get_integration_context()
export_uuid = integration_context.get('export_uuid')
demisto.info("checking export uuid")
if export_uuid:
demisto.info(f'{export_uuid=}')
vulnerabilities, finished_fetching = try_get_chunks(client=client, export_uuid=export_uuid)
# set params for next run
if finished_fetching:
set_integration_context({'export_uuid': None})
next_run_vuln = time.mktime(datetime.now(tz=timezone.utc).timetuple())
new_last_run = {'next_fetch_vunl': next_run_vuln}
next_run: str = datetime.now(tz=timezone.utc).strftime(DATE_FORMAT)
new_last_run.update({'last_id': audit_logs[-1].get('id') if audit_logs else last_id_fetched,
'next_fetch': next_run}) # type : ignore
demisto.info(f'Done fetching {len(audit_logs)} audit logs, Setting {new_last_run=}.')
demisto.info(f'{len(vulnerabilities)=}, {len(audit_logs)=}')
return vulnerabilities, audit_logs, new_last_run
|
def fetch_events_command(client: Client, first_fetch: Optional[datetime], last_run: dict, limit: int = 1000):
"""
Fetches vulnerabilities if job has succeeded, and audit logs.
Args:
client: Client class object.
first_fetch: time to first fetch from.
last_run: last run object.
limit: number of audit logs to max fetch.
Returns: vulnerabilities, audit logs and updated last run object
"""
last_fetch = last_run.get('next_fetch')
last_id_fetched = last_run.get('last_id')
vulnerabilities = []
new_last_run = {}
index = 0
if not last_fetch and first_fetch:
start_date = first_fetch.strftime(DATE_FORMAT)
else:
start_date = last_fetch # type: ignore
audit_logs: List[Dict] = []
audit_logs_from_api = client.get_audit_logs_request(from_date=start_date)
if last_id_fetched:
for index, log in enumerate(audit_logs_from_api, 1):
if log.get('id') == last_id_fetched:
break
demisto.info(f'{len(audit_logs_from_api)}')
audit_logs_from_api_len = len(audit_logs_from_api)
last_log_to_fetch = min(audit_logs_from_api_len, limit)
demisto.info(f'{index=}, {last_log_to_fetch=}')
if index < audit_logs_from_api_len and last_log_to_fetch <= audit_logs_from_api_len:
demisto.info('here again')
audit_logs.extend(audit_logs_from_api[index:last_log_to_fetch])
# trying to fetch vulnerabilities
integration_context = get_integration_context()
export_uuid = integration_context.get('export_uuid')
demisto.info("checking export uuid")
if export_uuid:
demisto.info(f'{export_uuid=}')
vulnerabilities, finished_fetching = try_get_chunks(client=client, export_uuid=export_uuid)
# set params for next run
if finished_fetching:
set_integration_context({'export_uuid': None})
next_run_vuln = time.mktime(datetime.now(tz=timezone.utc).timetuple())
new_last_run = {'next_fetch_vunl': next_run_vuln}
next_run: str = datetime.now(tz=timezone.utc).strftime(DATE_FORMAT)
new_last_run.update({'last_id': audit_logs[-1].get('id') if audit_logs else last_id_fetched,
'next_fetch': next_run}) # type : ignore
demisto.info(f'Done fetching {len(audit_logs)} audit logs, Setting {new_last_run=}.')
demisto.info(f'{len(vulnerabilities)=}, {len(audit_logs)=}')
return vulnerabilities, audit_logs, new_last_run
|
24,529 |
def find_floating_potential(
voltage: np.ndarray,
current: np.ndarray,
threshold: int = 1,
min_points: int = 5,
):
"""
Determines the floating potential (Vf) for a given Current-Voltage (IV) curve
generate by a swept Langmuir probe. The floating potential is the probe
bias where the collected current goes to zero.
How it works
------------
#. The current array ``current` is searched for all points equal to zero and
point pairs that straddle ``current = 0`` to form a set of crossing-points.
#. Crossing-points are then grouped into crossing-islands in accordence to
the ``threshold`` keyword.
#. If multiple crossing-islands are found, then an warning issued and
`~numpy.nan` is returned.
#. To calculated the floating potential, a `~scipy.stats.linregress` is applied
to points making up the cross-island. If the number of points that make
up the crossing-island are less than ``min_points``, then each side of the
crossing-island is padded with the nearest neighbors until `min_points` is
satisfied.
Parameters
----------
voltage: np.ndarray
1-D numpy array of ascending probe biases (in Volts)
current: np.ndarray
1-D numpy array of probe current (in A) corresponding to the :data:`voltage`
array
threshold: positive, non-zero int
Max index distance between found crossing-points to group crossing-points
into crossing-islands. That is, if `threshold=5` then consecutive
crossing-points are considered to be in the same crossing-island if they are
within 5 indices of each other. (Default: 1)
min_points: positive, non-zero int
The minimum number of points required for the linear regression. (Default: 5)
Returns
-------
vf: `numpy.float64` or `numpy.nan`
The calculated floating potential (in Volts). Returns `numpy.nan` if the
floating potential can not be determined.
vf_err: `numpy.float64` or `numpy.nan`
The error associated with the floating potential calculation (in Volts).
Returns `numpy.nan` if the floating potential can not be determined.
fit: Dict[str, Any]
A dictionary containing the linear regression fit results and parameters.
Keys are `'slope'`, `'slope_err'`, `'intercept'`, `'intercept_err'`, and
`'indices'`, where `'indices'` is a `slice` object corresponding to the
data points used in the fit. Returns an empty dict if the floating
potential can not be determined.
"""
if current.min() > 0.0 or current.max() < 0:
warn("The Langmuir sweep has no floating potential.")
return np.nan, np.nan, {}
# condition kwarg threshold
if isinstance(threshold, (int, float)):
threshold = int(np.round(threshold))
if threshold < 1:
warn(f"threshold ({threshold}) is less than 1 and needs to be"
f" an int >= 1, using a value of 1")
threshold = 1
else:
warn(f"threshold is NOT a integer >= 1, using a value of 1")
# find possible crossing points (cp)
lower_vals = np.where(current <= 0, True, False)
upper_vals = np.where(0 <= current, True, False)
cp_exact = np.logical_and(lower_vals, upper_vals).nonzero()[0]
cp_low2high = np.logical_and(np.roll(lower_vals, 1), upper_vals).nonzero()[0]
cp_high2low = np.logical_and(np.roll(lower_vals, -1), upper_vals).nonzero()[0]
cp_candidates = np.concatenate((
cp_exact,
cp_low2high,
cp_low2high - 1,
cp_high2low,
cp_high2low + 1,
))
cp_candidates = np.unique(cp_candidates) # sorted and unique
# How many crossing-islands?
cp_intervals = np.diff(cp_candidates)
if np.count_nonzero(np.where(cp_intervals > threshold, True, False)) != 0:
# There are multiple crossing points
warn("Unable to determine floating potential, Langmuir sweep has multiple "
"crossing-islands. Try adjusting 'threshold'.")
return np.nan, np.nan, {}
# Construct crossing-island (pad if needed)
istart = cp_candidates[0]
istop = cp_candidates[-1]
iadd = (istop - istart + 1) - min_points
if iadd < 0:
# need to pad
iadd_2_start, iadd_2_stop = int(np.ceil(-iadd / 2.0))
if istart - iadd_2_start < 0:
iadd_2_stop += iadd_2_start - istart
iadd_2_start = 0
istart = 0
if ((current.size - 1) - (istop + iadd_2_stop)) < 0:
iadd_2_start += iadd_2_stop - (current.size - 1 - istop)
# iadd_2_stop = 0
istop = current.size - 1
istart = 0 if (istart - iadd_2_start < 0) else (istart - iadd_2_start)
# iadd_2_start = 0
if (istop - istart + 1) < min_points:
warn(f"The number of elements in the current array ({current.size}) is less"
f" than 'min_points' ({min_points}).")
# Perform Linear Regression Fit
volt_sub = voltage[istart:istop + 1]
curr_sub = current[istart:istop + 1]
fit = linregress(volt_sub, curr_sub)
slope = fit[0]
slope_err = fit[4]
intercept = fit[1]
intercept_err = np.sum(volt_sub ** 2) - ((np.sum(volt_sub) ** 2) / volt_sub.size)
intercept_err = slope_err * np.sqrt(1.0 / intercept_err)
vf = -intercept / slope
vf_err = np.abs(vf * np.sqrt(((slope_err / slope) ** 2)
+ ((intercept_err / intercept) ** 2)))
fit = {'slope': slope,
'slope_err': slope_err,
'intercept': intercept,
'intercept_err': intercept_err,
'indices': slice(istart, istop + 1)}
return vf, vf_err, fit
|
def find_floating_potential(
voltage: np.ndarray,
current: np.ndarray,
threshold: int = 1,
min_points: int = 5,
):
"""
Determines the floating potential (Vf) for a given Current-Voltage (IV) curve
generate by a swept Langmuir probe. The floating potential is the probe
bias where the collected current goes to zero.
How it works
------------
#. The current array ``current` is searched for all points equal to zero and
point pairs that straddle ``current = 0`` to form a set of crossing-points.
#. Crossing-points are then grouped into crossing-islands in accordence to
the ``threshold`` keyword.
#. If multiple crossing-islands are found, then an warning issued and
`~numpy.nan` is returned.
#. To calculate the floating potential, a `~scipy.stats.linregress` is applied
to points making up the cross-island. If the number of points that make
up the crossing-island are less than ``min_points``, then each side of the
crossing-island is padded with the nearest neighbors until `min_points` is
satisfied.
Parameters
----------
voltage: np.ndarray
1-D numpy array of ascending probe biases (in Volts)
current: np.ndarray
1-D numpy array of probe current (in A) corresponding to the :data:`voltage`
array
threshold: positive, non-zero int
Max index distance between found crossing-points to group crossing-points
into crossing-islands. That is, if `threshold=5` then consecutive
crossing-points are considered to be in the same crossing-island if they are
within 5 indices of each other. (Default: 1)
min_points: positive, non-zero int
The minimum number of points required for the linear regression. (Default: 5)
Returns
-------
vf: `numpy.float64` or `numpy.nan`
The calculated floating potential (in Volts). Returns `numpy.nan` if the
floating potential can not be determined.
vf_err: `numpy.float64` or `numpy.nan`
The error associated with the floating potential calculation (in Volts).
Returns `numpy.nan` if the floating potential can not be determined.
fit: Dict[str, Any]
A dictionary containing the linear regression fit results and parameters.
Keys are `'slope'`, `'slope_err'`, `'intercept'`, `'intercept_err'`, and
`'indices'`, where `'indices'` is a `slice` object corresponding to the
data points used in the fit. Returns an empty dict if the floating
potential can not be determined.
"""
if current.min() > 0.0 or current.max() < 0:
warn("The Langmuir sweep has no floating potential.")
return np.nan, np.nan, {}
# condition kwarg threshold
if isinstance(threshold, (int, float)):
threshold = int(np.round(threshold))
if threshold < 1:
warn(f"threshold ({threshold}) is less than 1 and needs to be"
f" an int >= 1, using a value of 1")
threshold = 1
else:
warn(f"threshold is NOT a integer >= 1, using a value of 1")
# find possible crossing points (cp)
lower_vals = np.where(current <= 0, True, False)
upper_vals = np.where(0 <= current, True, False)
cp_exact = np.logical_and(lower_vals, upper_vals).nonzero()[0]
cp_low2high = np.logical_and(np.roll(lower_vals, 1), upper_vals).nonzero()[0]
cp_high2low = np.logical_and(np.roll(lower_vals, -1), upper_vals).nonzero()[0]
cp_candidates = np.concatenate((
cp_exact,
cp_low2high,
cp_low2high - 1,
cp_high2low,
cp_high2low + 1,
))
cp_candidates = np.unique(cp_candidates) # sorted and unique
# How many crossing-islands?
cp_intervals = np.diff(cp_candidates)
if np.count_nonzero(np.where(cp_intervals > threshold, True, False)) != 0:
# There are multiple crossing points
warn("Unable to determine floating potential, Langmuir sweep has multiple "
"crossing-islands. Try adjusting 'threshold'.")
return np.nan, np.nan, {}
# Construct crossing-island (pad if needed)
istart = cp_candidates[0]
istop = cp_candidates[-1]
iadd = (istop - istart + 1) - min_points
if iadd < 0:
# need to pad
iadd_2_start, iadd_2_stop = int(np.ceil(-iadd / 2.0))
if istart - iadd_2_start < 0:
iadd_2_stop += iadd_2_start - istart
iadd_2_start = 0
istart = 0
if ((current.size - 1) - (istop + iadd_2_stop)) < 0:
iadd_2_start += iadd_2_stop - (current.size - 1 - istop)
# iadd_2_stop = 0
istop = current.size - 1
istart = 0 if (istart - iadd_2_start < 0) else (istart - iadd_2_start)
# iadd_2_start = 0
if (istop - istart + 1) < min_points:
warn(f"The number of elements in the current array ({current.size}) is less"
f" than 'min_points' ({min_points}).")
# Perform Linear Regression Fit
volt_sub = voltage[istart:istop + 1]
curr_sub = current[istart:istop + 1]
fit = linregress(volt_sub, curr_sub)
slope = fit[0]
slope_err = fit[4]
intercept = fit[1]
intercept_err = np.sum(volt_sub ** 2) - ((np.sum(volt_sub) ** 2) / volt_sub.size)
intercept_err = slope_err * np.sqrt(1.0 / intercept_err)
vf = -intercept / slope
vf_err = np.abs(vf * np.sqrt(((slope_err / slope) ** 2)
+ ((intercept_err / intercept) ** 2)))
fit = {'slope': slope,
'slope_err': slope_err,
'intercept': intercept,
'intercept_err': intercept_err,
'indices': slice(istart, istop + 1)}
return vf, vf_err, fit
|
43,664 |
def molecular_hamiltonian(
name,
geo_file,
charge=0,
mult=1,
basis="sto-3g",
package="pyscf",
nact_els=None,
nact_orbs=None,
mapping="jordan_wigner",
outpath=".",
): # pylint:disable=too-many-arguments
r"""Generates the qubit Hamiltonian of a molecule.
This function drives the construction of the second-quantized electronic Hamiltonian
of a molecule and its transformation to the basis of Pauli matrices.
#. The starting point is reading the file containing the geometry of the molecule.
#. OpenFermion-PySCF or OpenFermion-Psi4 plugins are used to launch
the Hartree-Fock (HF) calculation for the polyatomic system using the quantum
chemistry package ``PySCF`` or ``Psi4``, respectively.
- The net charge of the molecule can be given to simulate
cationic/anionic systems. Also, the spin multiplicity can be inputted
to determine the number of unpaired electrons occupying the HF orbitals
as illustrated in the left panel of the figure below.
- The basis of Gaussian-type *atomic* orbitals used to represent the *molecular* orbitals
can be specified to go beyond the minimum basis approximation. Basis set availability
per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
#. An active space can be defined for a given number of *active electrons*
occupying a reduced set of *active orbitals* in the vicinity of frontier
orbitals as sketched in the right panel of the figure below.
#. Finally, the second-quantized Hamiltonian is mapped to the Pauli basis and
converted to a PennyLane observable.
|
.. figure:: ../../_static/qchem/fig_mult_active_space.png
:align: center
:width: 90%
|
Args:
name (str): Name of the molecule
geo_file (str): File containing the geometry of the molecule
charge (int): Net charge of the molecule. If not specified a a neutral system is assumed.
mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1`
for :math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals.
Possible values of ``mult`` are :math:`1, 2, 3, \ldots`. If not specified,
a closed-shell HF state is assumed.
basis (str): Atomic basis set used to represent the HF orbitals. Basis set
availability per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
package (str): Quantum chemistry package (pyscf or psi4) used to solve the
mean field electronic structure problem
nact_els (int): Number of active electrons. If not specified, all electrons
are considered to be active
nact_orbs (int): Number of active orbitals. If not specified, all orbitals
are considered to be active
mapping (str): Transformation (``'jordan_wigner'`` or ``'bravyi_kitaev'``) used to
map the second-quantized electronic Hamiltonian to the qubit Hamiltonian
outpath (str): Path to the directory containing output files
Returns:
tuple[pennylane.Hamiltonian, int]: The fermionic-to-qubit transformed
Hamiltonian and the number of qubits
**Example**
>>> name = "h2"
>>> geo_file = "h2.xyz"
>>> H, n_qubits = molecular_hamiltonian(name, geo_file)
>>> print(n_qubits)
4
>>> print(H)
(-0.04207897647782188) [I0]
+ (0.17771287465139934) [Z0]
+ (0.1777128746513993) [Z1]
+ (-0.24274280513140484) [Z2]
+ (-0.24274280513140484) [Z3]
+ (0.17059738328801055) [Z0 Z1]
+ (0.04475014401535161) [Y0 X1 X2 Y3]
+ (-0.04475014401535161) [Y0 Y1 X2 X3]
+ (-0.04475014401535161) [X0 X1 Y2 Y3]
+ (0.04475014401535161) [X0 Y1 Y2 X3]
+ (0.12293305056183801) [Z0 Z2]
+ (0.1676831945771896) [Z0 Z3]
+ (0.1676831945771896) [Z1 Z2]
+ (0.12293305056183801) [Z1 Z3]
+ (0.176276408043196) [Z2 Z3]
"""
geometry = read_structure(geo_file, outpath)
hf_file = meanfield(name, geometry, charge, mult, basis, package, outpath)
molecule = MolecularData(filename=hf_file)
core, active = active_space(
molecule.n_electrons, molecule.n_orbitals, mult, nact_els, nact_orbs
)
h_of, n_qubits = (
decompose_molecular_hamiltonian(hf_file, mapping, core, active),
2 * len(active),
)
return convert_observable(h_of), n_qubits
|
def molecular_hamiltonian(
name,
geo_file,
charge=0,
mult=1,
basis="sto-3g",
package="pyscf",
nact_els=None,
nact_orbs=None,
mapping="jordan_wigner",
outpath=".",
): # pylint:disable=too-many-arguments
r"""Generates the qubit Hamiltonian of a molecule.
This function drives the construction of the second-quantized electronic Hamiltonian
of a molecule and its transformation to the basis of Pauli matrices.
#. The process begins by reading the file containing the geometry of the molecule.
#. OpenFermion-PySCF or OpenFermion-Psi4 plugins are used to launch
the Hartree-Fock (HF) calculation for the polyatomic system using the quantum
chemistry package ``PySCF`` or ``Psi4``, respectively.
- The net charge of the molecule can be given to simulate
cationic/anionic systems. Also, the spin multiplicity can be inputted
to determine the number of unpaired electrons occupying the HF orbitals
as illustrated in the left panel of the figure below.
- The basis of Gaussian-type *atomic* orbitals used to represent the *molecular* orbitals
can be specified to go beyond the minimum basis approximation. Basis set availability
per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
#. An active space can be defined for a given number of *active electrons*
occupying a reduced set of *active orbitals* in the vicinity of frontier
orbitals as sketched in the right panel of the figure below.
#. Finally, the second-quantized Hamiltonian is mapped to the Pauli basis and
converted to a PennyLane observable.
|
.. figure:: ../../_static/qchem/fig_mult_active_space.png
:align: center
:width: 90%
|
Args:
name (str): Name of the molecule
geo_file (str): File containing the geometry of the molecule
charge (int): Net charge of the molecule. If not specified a a neutral system is assumed.
mult (int): Spin multiplicity :math:`\mathrm{mult}=N_\mathrm{unpaired} + 1`
for :math:`N_\mathrm{unpaired}` unpaired electrons occupying the HF orbitals.
Possible values of ``mult`` are :math:`1, 2, 3, \ldots`. If not specified,
a closed-shell HF state is assumed.
basis (str): Atomic basis set used to represent the HF orbitals. Basis set
availability per element can be found
`here <www.psicode.org/psi4manual/master/basissets_byelement.html#apdx-basiselement>`_
package (str): Quantum chemistry package (pyscf or psi4) used to solve the
mean field electronic structure problem
nact_els (int): Number of active electrons. If not specified, all electrons
are considered to be active
nact_orbs (int): Number of active orbitals. If not specified, all orbitals
are considered to be active
mapping (str): Transformation (``'jordan_wigner'`` or ``'bravyi_kitaev'``) used to
map the second-quantized electronic Hamiltonian to the qubit Hamiltonian
outpath (str): Path to the directory containing output files
Returns:
tuple[pennylane.Hamiltonian, int]: The fermionic-to-qubit transformed
Hamiltonian and the number of qubits
**Example**
>>> name = "h2"
>>> geo_file = "h2.xyz"
>>> H, n_qubits = molecular_hamiltonian(name, geo_file)
>>> print(n_qubits)
4
>>> print(H)
(-0.04207897647782188) [I0]
+ (0.17771287465139934) [Z0]
+ (0.1777128746513993) [Z1]
+ (-0.24274280513140484) [Z2]
+ (-0.24274280513140484) [Z3]
+ (0.17059738328801055) [Z0 Z1]
+ (0.04475014401535161) [Y0 X1 X2 Y3]
+ (-0.04475014401535161) [Y0 Y1 X2 X3]
+ (-0.04475014401535161) [X0 X1 Y2 Y3]
+ (0.04475014401535161) [X0 Y1 Y2 X3]
+ (0.12293305056183801) [Z0 Z2]
+ (0.1676831945771896) [Z0 Z3]
+ (0.1676831945771896) [Z1 Z2]
+ (0.12293305056183801) [Z1 Z3]
+ (0.176276408043196) [Z2 Z3]
"""
geometry = read_structure(geo_file, outpath)
hf_file = meanfield(name, geometry, charge, mult, basis, package, outpath)
molecule = MolecularData(filename=hf_file)
core, active = active_space(
molecule.n_electrons, molecule.n_orbitals, mult, nact_els, nact_orbs
)
h_of, n_qubits = (
decompose_molecular_hamiltonian(hf_file, mapping, core, active),
2 * len(active),
)
return convert_observable(h_of), n_qubits
|
44,608 |
def create_sparse_tensor_features(sparse_tensor, name='adj'):
feature_dict = {}
val = sparse_tensor.values.numpy()
ind = sparse_tensor.indices.numpy()
shape = sparse_tensor.dense_shape.numpy()
feature_dict[name + '_val'] = tf.train.Feature(
float_list=tf.train.FloatList(value=val))
for i in range(ind.shape[-1]):
feature_dict[name + '_ind_' + str(i)] = tf.train.Feature(
int64_list=tf.train.Int64List(value=ind[:, i]))
return feature_dict
|
def create_sparse_tensor_features(sparse_tensor, name='adj'):
feature_dict = {}
val = sparse_tensor.values.numpy()
ind = sparse_tensor.indices.numpy()
shape = sparse_tensor.dense_shape.numpy()
feature_dict[name + '_val'] = tf.train.Feature(
float_list=tf.train.FloatList(value=val))
for i in range(ind.shape[-1]):
feature_dict['{}_ind_{}'.format(name, i)] = tf.train.Feature(
int64_list=tf.train.Int64List(value=ind[:, i]))
return feature_dict
|
21,983 |
def validateSessionWithToken(sydent, sid, clientSecret, token):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
:param sid: The ID of the session to validate.
:type sid: unicode
:param clientSecret: The client secret to validate.
:type clientSecret: unicode
:param token: The token to validate.
:type token: unicode
:return: A dict with a "success" key which is True if the session
was successfully validated, False otherwise.
:rtype: dict[str, bool]
:raise IncorrectClientSecretException: The provided client_secret is incorrect.
:raise SessionExpiredException: The session has expired.
:raise InvalidSessionIdException: The session ID couldn't be matched with an
existing session.
:raise IncorrectSessionTokenException: The provided token is incorrect
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
raise InvalidSessionIdException()
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", (s.id))
valSessionStore.setValidated(s.id, True)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return IncorrectSessionTokenException()
|
def validateSessionWithToken(sydent, sid, clientSecret, token):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
:param sid: The ID of the session to validate.
:type sid: unicode
:param clientSecret: The client secret to validate.
:type clientSecret: unicode
:param token: The token to validate.
:type token: unicode
:return: A dict with a "success" key which is True if the session
was successfully validated, False otherwise.
:rtype: dict[str, bool]
:raise IncorrectClientSecretException: The provided client_secret is incorrect.
:raise SessionExpiredException: The session has expired.
:raise InvalidSessionIdException: The session ID couldn't be matched with an
existing session.
:raise IncorrectSessionTokenException: The provided token is incorrect
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
raise InvalidSessionIdException()
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", sid)
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", (s.id))
valSessionStore.setValidated(s.id, True)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return IncorrectSessionTokenException()
|
36,262 |
def read_10x_h5(
filename: Union[str, Path], genome: Optional[str] = None, gex_only: bool = True,
) -> AnnData:
"""\
Read 10x-Genomics-formatted hdf5 file.
Parameters
----------
filename
Path to a 10x hdf5 file.
genome
Filter expression to genes within this genome. For legacy 10x h5
files, this must be provided if the data contains more than one genome.
gex_only
Only keep 'Gene Expression' data and ignore other feature types,
e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'
Returns
-------
Annotated data matrix, where observations/cells are named by their
barcode and variables/genes by gene name. Stores the following information:
:attr:`~anndata.AnnData.X`
The data matrix is stored
:attr:`~anndata.AnnData.obs_names`
Cell names
:attr:`~anndata.AnnData.var_names`
Gene names
:attr:`~anndata.AnnData.var`\\ `['gene_ids']`
Gene IDs
:attr:`~anndata.AnnData.var`\\ `['feature_types']`
Feature types
"""
start = logg.info(f'reading {filename}')
with tables.open_file(str(filename), 'r') as f:
v3 = '/matrix' in f
if v3:
adata = _read_v3_10x_h5(filename, start=start)
if genome:
if genome not in adata.var['genome'].values:
raise ValueError(
f"Could not find data corresponding to genome '{genome}' in '{filename}'. "
f'Available genomes are: {list(adata.var["genome"].unique())}.'
)
adata = adata[:, adata.var['genome'] == f"{genome}"]
if gex_only:
adata = adata[:, adata.var['feature_types'] == 'Gene Expression']
if adata.is_view:
adata = adata.copy()
else:
adata = _read_legacy_10x_h5(filename, genome=genome, start=start)
return adata
|
def read_10x_h5(
filename: Union[str, Path], genome: Optional[str] = None, gex_only: bool = True,
) -> AnnData:
"""\
Read 10x-Genomics-formatted hdf5 file.
Parameters
----------
filename
Path to a 10x hdf5 file.
genome
Filter expression to genes within this genome. For legacy 10x h5
files, this must be provided if the data contains more than one genome.
gex_only
Only keep 'Gene Expression' data and ignore other feature types,
e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom'
Returns
-------
Annotated data matrix, where observations/cells are named by their
barcode and variables/genes by gene name. Stores the following information:
:attr:`~anndata.AnnData.X`
The data matrix is stored
:attr:`~anndata.AnnData.obs_names`
Cell names
:attr:`~anndata.AnnData.var_names`
Gene names
:attr:`~anndata.AnnData.var`\\ `['gene_ids']`
Gene IDs
:attr:`~anndata.AnnData.var`\\ `['feature_types']`
Feature types
"""
start = logg.info(f'reading {filename}')
with tables.open_file(str(filename), 'r') as f:
v3 = '/matrix' in f
if v3:
adata = _read_v3_10x_h5(filename, start=start)
if genome:
if genome not in adata.var['genome'].values:
raise ValueError(
f"Could not find data corresponding to genome '{genome}' in '{filename}'. "
f'Available genomes are: {list(adata.var["genome"].unique())}.'
)
adata = adata[:, adata.var['genome'] == genome]
if gex_only:
adata = adata[:, adata.var['feature_types'] == 'Gene Expression']
if adata.is_view:
adata = adata.copy()
else:
adata = _read_legacy_10x_h5(filename, genome=genome, start=start)
return adata
|
27,997 |
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer in config_map.items():
analyzer_name, analyzer_cfg = analyzer
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name} no checkers were enabled.")
analyzers = [a for a in analyzers if a != analyzer_name]
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer_name, analyzer_cfg in config_map.items():
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name} no checkers were enabled.")
analyzers = [a for a in analyzers if a != analyzer_name]
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
3,471 |
def identity(val):
val.pop('path', None) # path is fetched from item
return val
|
def identity(val):
val.pop('path', None) # path is fetched from item
return val
|
31,659 |
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abnormal-security-get-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.