id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
25,786 |
def aggregateoneport(
network, busmap, component, with_time=True, custom_strategies=dict()
):
if network.df(component).empty:
return network.df(component), network.pnl(component)
attrs = network.components[component]["attrs"]
old_df = getattr(network, network.components[component]["list_name"]).assign(
bus=lambda df: df.bus.map(busmap)
)
columns = set(
attrs.index[attrs.static & attrs.status.str.startswith("Input")]
) & set(old_df.columns)
grouper = old_df.bus if "carrier" not in columns else [old_df.bus, old_df.carrier]
def aggregate_max_hours(max_hours):
if (max_hours == max_hours.iloc[0]).all():
return max_hours.iloc[0]
else:
return (max_hours * _normed(old_df.p_nom.reindex(max_hours.index))).sum()
default_strategies = dict(
p=pd.Series.sum,
q=pd.Series.sum,
p_set=pd.Series.sum,
q_set=pd.Series.sum,
p_nom=pd.Series.sum,
p_nom_max=pd.Series.sum,
p_nom_min=pd.Series.sum,
max_hours=aggregate_max_hours,
)
strategies = {
attr: default_strategies.get(attr, _make_consense(component, attr))
for attr in columns
}
strategies.update(custom_strategies)
new_df = old_df.groupby(grouper).agg(strategies)
new_df.index = _flatten_multiindex(new_df.index).rename("name")
new_pnl = dict()
def normed_or_uniform(x):
return x/x.sum() if x.sum(skipna=False) > 0 else pd.Series(1./len(x), x.index)
if "e_nom" in new_df.columns:
weighting = old_df.e_nom.groupby(grouper, axis=0).transform(normed_or_uniform)
elif "p_nom" in new_df.columns:
weighting = old_df.p_nom.groupby(grouper, axis=0).transform(normed_or_uniform)
if with_time:
old_pnl = network.pnl(component)
for attr, df in old_pnl.items():
if not df.empty:
if attr in ["e_min_pu", "e_max_pu", "p_min_pu", "p_max_pu"]:
df = df.multiply(weighting.loc[df.columns], axis=1)
pnl_df = df.groupby(grouper, axis=1).sum()
pnl_df.columns = _flatten_multiindex(pnl_df.columns).rename("name")
new_pnl[attr] = pnl_df
return new_df, new_pnl
|
def aggregateoneport(
network, busmap, component, with_time=True, custom_strategies=dict()
):
if network.df(component).empty:
return network.df(component), network.pnl(component)
attrs = network.components[component]["attrs"]
old_df = getattr(network, network.components[component]["list_name"]).assign(
bus=lambda df: df.bus.map(busmap)
)
columns = set(
attrs.index[attrs.static & attrs.status.str.startswith("Input")]
) & set(old_df.columns)
grouper = old_df.bus if "carrier" not in columns else [old_df.bus, old_df.carrier]
def aggregate_max_hours(max_hours):
if (max_hours == max_hours.iloc[0]).all():
return max_hours.iloc[0]
else:
return (max_hours * _normed(old_df.p_nom.reindex(max_hours.index))).sum()
default_strategies = dict(
p=pd.Series.sum,
q=pd.Series.sum,
p_set=pd.Series.sum,
q_set=pd.Series.sum,
p_nom=pd.Series.sum,
p_nom_max=pd.Series.sum,
p_nom_min=pd.Series.sum,
max_hours=aggregate_max_hours,
)
strategies = {
attr: default_strategies.get(attr, _make_consense(component, attr))
for attr in columns
}
strategies.update(custom_strategies)
new_df = old_df.groupby(grouper).agg(strategies)
new_df.index = _flatten_multiindex(new_df.index).rename("name")
new_pnl = dict()
def normed_or_uniform(x):
return x/x.sum() if x.sum(skipna=False) > 0 else pd.Series(1./len(x), x.index)
if "e_nom" in new_df.columns:
weighting = old_df.e_nom.groupby(grouper, axis=0).transform(normed_or_uniform)
elif "p_nom" in new_df.columns:
weighting = old_df.p_nom.groupby(grouper, axis=0).transform(normed_or_uniform)
if with_time:
old_pnl = network.pnl(component)
for attr, df in old_pnl.items():
if not df.empty:
if attr in ["e_min_pu", "e_max_pu", "p_min_pu", "p_max_pu"]:
df = df.multiply(weighting[df.columns], axis=1)
pnl_df = df.groupby(grouper, axis=1).sum()
pnl_df.columns = _flatten_multiindex(pnl_df.columns).rename("name")
new_pnl[attr] = pnl_df
return new_df, new_pnl
|
13,901 |
def _parse_rfc3339(value: str) -> datetime.datetime:
r"""
Parse an RFC-3339 or ISO-like timestamp.
This will accept timestamps in the following formats:
* `YYYY-MMM-DD hh:mm:ss`
* same, but with `T` separator instead of space
* same, but with timezone offset (e.g. `+hh:mm` or `Z`)
Note that timestamps without an explicit timezone are naive
and represent a local time,
which may complicate some aspects of testing.
Examples for separators:
>>> _parse_rfc3339("2021-12-27 13:05:27").isoformat()
'2021-12-27T13:05:27'
>>> _parse_rfc3339("2021-12-27T13:05:27").isoformat()
'2021-12-27T13:05:27'
>>> _parse_rfc3339("2021-12-27@13:05:27+12:30")
Traceback (most recent call last):
...
ValueError: timestamp separator must be 'T' or space
Examples for timezone offsets:
>>> _parse_rfc3339("2021-12-27 13:05:27Z").isoformat()
'2021-12-27T13:05:27+00:00'
>>> _parse_rfc3339("2021-12-27 13:05:27+12:30").isoformat()
'2021-12-27T13:05:27+12:30'
>>> _parse_rfc3339("2021-12-27T13:05:27-07:23").isoformat()
'2021-12-27T13:05:27-07:23'
Examples for invalid syntax
>>> _parse_rfc3339("2021/12/27 13:05:27")
Traceback (most recent call last):
...
ValueError: timestamp must use RFC-3339 ...
>>> _parse_rfc3339("2021-12-27 13:05:27 UTC")
Traceback (most recent call last):
...
ValueError: timezone offset must be 'Z' or +hh:mm
"""
date_value = value[:10] # YYYY-MM-DD
sep = value[10] # T or space
time_value = value[11:19] # hh:mm:ss
tz_value = value[19:] # empty or Z or +hh:mm
if sep.lower() not in ("t", " "):
raise ValueError("timestamp separator must be 'T' or space")
try:
naive_timestamp = datetime.datetime.strptime(
date_value + " " + time_value,
"%Y-%m-%d %H:%M:%S",
)
except ValueError:
raise ValueError(
"timestamp must use RFC-3339 (YYYY-MM-DD hh:mm:ss) format"
) from None
if not tz_value:
return naive_timestamp
timezone = _parse_timezone(tz_value)
return naive_timestamp.replace(tzinfo=timezone)
|
def _parse_rfc3339(value: str) -> datetime.datetime:
r"""
Parse an RFC-3339 or ISO-like timestamp.
This will accept timestamps in the following formats:
* `YYYY-MMM-DD hh:mm:ss`
* same, but with `T` separator instead of space
* same, but with timezone offset (e.g. `+hh:mm` or `Z`)
Note that timestamps without an explicit timezone are naive
and represent a local time,
which may complicate some aspects of testing.
Examples for separators:
>>> _parse_rfc3339("2021-12-27 13:05:27").isoformat()
'2021-12-27T13:05:27'
>>> _parse_rfc3339("2021-12-27T13:05:27").isoformat()
'2021-12-27T13:05:27'
>>> _parse_rfc3339("2021-12-27@13:05:27+12:30")
Traceback (most recent call last):
...
ValueError: timestamp separator must be 'T' or space
Examples for timezone offsets:
>>> _parse_rfc3339("2021-12-27 13:05:27Z").isoformat()
'2021-12-27T13:05:27+00:00'
>>> _parse_rfc3339("2021-12-27 13:05:27+12:30").isoformat()
'2021-12-27T13:05:27+12:30'
>>> _parse_rfc3339("2021-12-27T13:05:27-07:23").isoformat()
'2021-12-27T13:05:27-07:23'
Examples for invalid syntax
>>> _parse_rfc3339("2021-12-27t13:05:27-07:23").isoformat()
Traceback (most recent call last):
...
ValueError: timestamp must use RFC-3339 ...
>>> _parse_rfc3339("2021-12-27 13:05:27 UTC")
Traceback (most recent call last):
...
ValueError: timezone offset must be 'Z' or +hh:mm
"""
date_value = value[:10] # YYYY-MM-DD
sep = value[10] # T or space
time_value = value[11:19] # hh:mm:ss
tz_value = value[19:] # empty or Z or +hh:mm
if sep.lower() not in ("t", " "):
raise ValueError("timestamp separator must be 'T' or space")
try:
naive_timestamp = datetime.datetime.strptime(
date_value + " " + time_value,
"%Y-%m-%d %H:%M:%S",
)
except ValueError:
raise ValueError(
"timestamp must use RFC-3339 (YYYY-MM-DD hh:mm:ss) format"
) from None
if not tz_value:
return naive_timestamp
timezone = _parse_timezone(tz_value)
return naive_timestamp.replace(tzinfo=timezone)
|
17,435 |
def backends_dict_from_pkg(pkg_entrypoints):
backend_entrypoints = {}
for pkg_ep in pkg_entrypoints:
name = pkg_ep.name
try:
backend = pkg_ep.load()
backend_entrypoints[name] = backend
except Exception as ex:
warnings.warn(f"Engine {name} loading failed:\n{ex}", RuntimeWarning)
return backend_entrypoints
|
def backends_dict_from_pkg(pkg_entrypoints):
backend_entrypoints = {}
for pkg_ep in pkg_entrypoints:
name = pkg_ep.name
try:
backend = pkg_ep.load()
backend_entrypoints[name] = backend
except Exception as ex:
warnings.warn(f"Engine {name!r} loading failed:\n{ex}", RuntimeWarning)
return backend_entrypoints
|
12,541 |
def parse_result(result: subprocess.CompletedProcess) -> None:
stdout = result.stdout.strip()
if result.returncode == 0:
if "Fixing" in stdout:
fixed_files = '\n'.join(parse_fixed_files(stdout.split("\n")))
print(f"The following files' imports were fixed:\n\n{fixed_files}")
else:
if "ERROR" in stdout:
failing_targets = '\n'.join(parse_failing_files(stdout.split("\n")))
die("The following files have incorrect import orders. Fix by running "
f"`./build-support/isort.py --fix`.\n\n{failing_targets}")
else:
# NB: we intentionally don't swallow stderr, so that will be printed before
# this message.
die("Unexepcted failure.")
|
def parse_result(result: subprocess.CompletedProcess) -> None:
stdout = result.stdout.strip()
if result.returncode == 0:
if "Fixing" in stdout:
fixed_files = '\n'.join(parse_fixed_files(stdout.split("\n")))
print(f"The following files' imports were fixed:\n\n{fixed_files}")
else:
if "ERROR" in stdout:
failing_targets = '\n'.join(parse_failing_files(stdout.split("\n")))
die("The following files have incorrect import orders. Fix by running "
f"`./build-support/isort.py --fix`.\n\n{failing_targets}")
else:
# NB: we intentionally don't swallow stderr, so that will be printed before
# this message.
die("Unexpected failure.")
|
3,107 |
def test_aggregate_categorical_with_isnan():
# GH 29837
df = pd.DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = pd.DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
pd.testing.assert_frame_equal(result, expected)
|
def test_aggregate_categorical_with_isnan():
# GH 29837
df = pd.DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = pd.DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
tm.assert_frame_equal(result, expected)
|
34,965 |
def deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NCHW layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
offset_np : numpy.ndarray
4-D with shape [batch, out_height, out_width,
deformable_groups * filter_height * filter_width * 2]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_np = np.transpose(a_np, [0, 3, 1, 2]) # NHWC -> NCHW
offset_np = np.transpose(offset_np, [0, 3, 1, 2]) # NHWC -> NCHW
w_np = np.transpose(w_np, [3, 2, 0, 1]) # HWIO -> OIHW
b_np = deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation,
deformable_groups, groups)
b_np = np.transpose(b_np, [0, 2, 3, 1]) # NCHW -> NHWC
return b_np
|
def deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
):
"""Deformable convolution operator in NHWC layout.
Parameters
----------
a_np : numpy.ndarray
4-D with shape [batch, in_height, in_width, in_channel]
offset_np : numpy.ndarray
4-D with shape [batch, out_height, out_width,
deformable_groups * filter_height * filter_width * 2]
w_np : numpy.ndarray
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or str or a list/tuple of 2 or 4 ints
Padding size, or ['VALID', 'SAME'], or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 2 ints
dilation : int or a list/tuple of two ints
Dilation size, or [dilate_height, dilate_width]
deformable_groups : int
Number of deformable groups
groups : int
Number of groups
Returns
-------
b_np : np.ndarray
4-D with shape [batch, out_channel, out_height, out_width]
"""
a_np = np.transpose(a_np, [0, 3, 1, 2]) # NHWC -> NCHW
offset_np = np.transpose(offset_np, [0, 3, 1, 2]) # NHWC -> NCHW
w_np = np.transpose(w_np, [3, 2, 0, 1]) # HWIO -> OIHW
b_np = deformable_conv2d_nchw_python(a_np, offset_np, w_np, stride, padding, dilation,
deformable_groups, groups)
b_np = np.transpose(b_np, [0, 2, 3, 1]) # NCHW -> NHWC
return b_np
|
45,562 |
def _test_pretrained(tmp_path, model_name, test_image, model_type=StarDist2D, test_image_norm_axes='ZYX'):
model = model_type.from_pretrained(model_name)
assert model is not None
# export model
export_path = tmp_path / f"{model_name}.zip"
export_bioimageio(model, export_path, test_input=test_image, test_input_norm_axes=test_image_norm_axes)
assert export_path.exists()
# test exported model
res = _test_model(export_path)
failed = [r for r in res if r["status"] != "passed"]
assert not failed, failed
# import exported model
import_path = tmp_path / f"{model_name}_imported"
model_imported = import_bioimageio(export_path, import_path)
# test that model and imported exported model are equal
def _n(d):
# normalize dict (especially tuples -> lists)
return json.loads(json.dumps(d))
assert _n(vars(model.config)) == _n(vars(model_imported.config))
assert _n(model.thresholds._asdict()) == _n(model_imported.thresholds._asdict())
assert all(np.allclose(u,v) for u,v in zip(model.keras_model.get_weights(),model_imported.keras_model.get_weights()))
|
def _test_pretrained(tmp_path, model_name, test_image, model_type=StarDist2D, test_image_norm_axes='ZYX'):
model = model_type.from_pretrained(model_name)
assert model is not None
# export model
export_path = tmp_path / f"{model_name}.zip"
export_bioimageio(model, export_path, test_input=test_image, test_input_norm_axes=test_image_norm_axes)
assert export_path.exists()
# test exported model
res = _test_model(export_path)
failed = [r for r in res if r["status"] != "passed"]
assert len(failed) == 0, failed
# import exported model
import_path = tmp_path / f"{model_name}_imported"
model_imported = import_bioimageio(export_path, import_path)
# test that model and imported exported model are equal
def _n(d):
# normalize dict (especially tuples -> lists)
return json.loads(json.dumps(d))
assert _n(vars(model.config)) == _n(vars(model_imported.config))
assert _n(model.thresholds._asdict()) == _n(model_imported.thresholds._asdict())
assert all(np.allclose(u,v) for u,v in zip(model.keras_model.get_weights(),model_imported.keras_model.get_weights()))
|
39,881 |
def load_seednodes(emitter,
min_stake: int,
federated_only: bool,
network_domains: set,
network_middleware: RestMiddleware = None,
teacher_uris: list = None
) -> List[Ursula]:
# Set domains
if network_domains is None:
from nucypher.config.node import CharacterConfiguration
network_domains = {CharacterConfiguration.DEFAULT_DOMAIN, }
teacher_nodes = list() # Ursula
if teacher_uris is None:
teacher_uris = list()
for domain in network_domains:
try:
# Known NuCypher Domain
seednode_uris = TEACHER_NODES[domain]
except KeyError:
# Unknown NuCypher Domain
if not teacher_uris:
emitter.message(f"No default teacher nodes exist for the specified network: {domain}")
else:
# Prefer the injected teacher URI, then use the hardcoded seednodes.
teacher_uris.append(seednode_uris)
for uri in teacher_uris:
teacher_node = Ursula.from_teacher_uri(teacher_uri=uri,
min_stake=min_stake,
federated_only=federated_only,
network_middleware=network_middleware)
teacher_nodes.append(teacher_node)
if not teacher_nodes:
emitter.message(f'WARNING - No Bootnodes Available')
return teacher_nodes
|
def load_seednodes(emitter,
min_stake: int,
federated_only: bool,
network_domains: set,
network_middleware: RestMiddleware = None,
teacher_uris: list = None
) -> List[Ursula]:
# Set domains
if network_domains is None:
from nucypher.config.node import CharacterConfiguration
network_domains = {CharacterConfiguration.DEFAULT_DOMAIN, }
teacher_nodes = list() # Ursula
if teacher_uris is None:
teacher_uris = list()
for domain in network_domains:
try:
# Known NuCypher Domain
seednode_uris = TEACHER_NODES[domain]
except KeyError:
# Unknown NuCypher Domain
if not teacher_uris:
emitter.message(f"No default teacher nodes exist for the specified network: {domain}")
else:
# Prefer the injected teacher URI, then use the hardcoded seednodes.
teacher_uris.extend(seednode_uris)
for uri in teacher_uris:
teacher_node = Ursula.from_teacher_uri(teacher_uri=uri,
min_stake=min_stake,
federated_only=federated_only,
network_middleware=network_middleware)
teacher_nodes.append(teacher_node)
if not teacher_nodes:
emitter.message(f'WARNING - No Bootnodes Available')
return teacher_nodes
|
46,524 |
def next_epoch_with_attestations(state,
fill_cur_epoch,
fill_prev_epoch):
post_state = deepcopy(state)
blocks = []
for slot in range(spec.SLOTS_PER_EPOCH):
block = build_empty_block_for_next_slot(post_state)
if fill_cur_epoch:
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)):
cur_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, cur_attestation)
block.body.attestations.append(cur_attestation)
if fill_prev_epoch:
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
prev_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, prev_attestation)
block.body.attestations.append(prev_attestation)
state_transition(post_state, block)
blocks.append(block)
return state, blocks, post_state
|
def next_epoch_with_attestations(state,
fill_cur_epoch,
fill_prev_epoch):
post_state = deepcopy(state)
blocks = []
for _ in range(spec.SLOTS_PER_EPOCH):
block = build_empty_block_for_next_slot(post_state)
if fill_cur_epoch:
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)):
cur_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, cur_attestation)
block.body.attestations.append(cur_attestation)
if fill_prev_epoch:
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
prev_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, prev_attestation)
block.body.attestations.append(prev_attestation)
state_transition(post_state, block)
blocks.append(block)
return state, blocks, post_state
|
4,122 |
def open_file():
p: cython.pointer(FILE) = fopen("spam.txt", "r")
if p is cython.NULL:
PyErr_SetFromErrnoWithFilenameObject(OSError, "spam.txt")
...
|
def open_file():
p = fopen("spam.txt", "r") # The type of "p" is "FILE*", as returned by fopen().
if p is cython.NULL:
PyErr_SetFromErrnoWithFilenameObject(OSError, "spam.txt")
...
|
52,143 |
def update_err(obj_id, cibadm_opt, xml, rc):
if cibadm_opt == '-U':
task = "update"
elif cibadm_opt == '-D':
task = "delete"
elif cibadm_opt == '-P':
task = "patch"
else:
task = "replace"
logger.error("could not {} {} (rc={})".format(task, obj_id, rc))
if rc == 54:
logger.info("Permission denied.")
elif task == "patch":
logger.info("offending xml diff: {}".format(xml))
else:
logger.info("offending xml: {}".format(xml))
# Above is a set of wrapped log message for specific scenarios
|
def update_err(obj_id, cibadm_opt, xml, rc):
task_table = {"-U": "update", "-D": "delete", "-P": "patch"}
task = task_table.get(cibadmin_opt, "replace")
logger.error("could not {} {} (rc={})".format(task, obj_id, rc))
if rc == 54:
logger.info("Permission denied.")
elif task == "patch":
logger.info("offending xml diff: {}".format(xml))
else:
logger.info("offending xml: {}".format(xml))
# Above is a set of wrapped log message for specific scenarios
|
36,247 |
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Literal['umap', 'tsne'] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
27,998 |
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer in config_map.items():
analyzer_name, analyzer_cfg = analyzer
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name} no checkers were enabled.")
analyzers = [a for a in analyzers if a != analyzer_name]
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
in the given analysis context for the supplied build actions.
Additionally, insert statistical information into the metadata dict.
"""
ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
args.ctu_reanalyze_on_failure
if ctu_reanalyze_on_failure:
LOG.warning("Usage of a DEPRECATED FLAG!\n"
"The --ctu-reanalyze-on-failure flag will be removed "
"in the upcoming releases!")
analyzers = args.analyzers if 'analyzers' in args \
else analyzer_types.supported_analyzers
analyzers, _ = analyzer_types.check_supported_analyzers(
analyzers, context)
ctu_collect = False
ctu_analyze = False
ctu_dir = ''
if 'ctu_phases' in args:
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
args.ctu_dir = ctu_dir
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.error("CTU can only be used with the clang static analyzer.")
return
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if 'stats_enabled' in args and args.stats_enabled:
if ClangSA.ANALYZER_NAME not in analyzers:
LOG.debug("Statistics can only be used with "
"the Clang Static Analyzer.")
return
config_map = analyzer_types.build_config_handlers(args, context, analyzers)
available_checkers = set()
# Add profile names to the checkers list so we will not warn
# if a profile is enabled but there is no checker with that name.
available_checkers.update(context.available_profiles.keys())
# Collect all the available checkers from the enabled analyzers.
for analyzer in config_map.items():
_, analyzer_cfg = analyzer
for analyzer_checker in analyzer_cfg.checks().items():
checker_name, _ = analyzer_checker
available_checkers.add(checker_name)
if 'ordered_checkers' in args:
missing_checkers = checkers.available(args.ordered_checkers,
available_checkers)
if missing_checkers:
LOG.warning("No checker(s) with these names was found:\n%s",
'\n'.join(missing_checkers))
LOG.warning("Please review the checker names.\n"
"In the next release the analysis will not start "
"with invalid checker names.")
if 'stats_enabled' in args:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_analyze)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_analyze)
# Statistics collector checkers must be explicitly disabled
# as they trash the output.
if "clangsa" in analyzers:
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
SpecialReturnValueCollector.checker_collect, False)
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
# CHECK if any checkers are enabled and only execute that analyzer
# where at least one checker is enabled.
for analyzer in config_map.items():
analyzer_name, analyzer_cfg = analyzer
if not analyzer_cfg.any_checker_enabled():
LOG.warning(f"Disabling {analyzer_name}: no checkers were enabled.")
analyzers = [a for a in analyzers if a != analyzer_name]
actions = prepare_actions(actions, analyzers)
# Save some metadata information.
for analyzer in analyzers:
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}
for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata_info['checkers'].update({
check: state == CheckerState.enabled})
version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version
metadata_tool['analyzers'][analyzer] = metadata_info
if 'makefile' in args and args.makefile:
statistics_data = __get_statistics_data(args)
ctu_data = None
if ctu_collect or statistics_data:
ctu_data = __get_ctu_data(config_map, ctu_dir)
makefile_creator = MakeFileCreator(analyzers, args.output_path,
config_map, context, skip_handler,
ctu_collect, statistics_data,
ctu_data)
makefile_creator.create(actions)
return
if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
elif ctu_analyze and not os.path.exists(ctu_dir):
LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
return
start_time = time.time()
# Use Manager to create data objects which can be
# safely shared between processes.
manager = SyncManager()
manager.start(__mgr_init)
actions_map = create_actions_map(actions, manager)
# Setting to not None value will enable statistical analysis features.
statistics_data = __get_statistics_data(args)
if statistics_data:
statistics_data = manager.dict(statistics_data)
if ctu_collect or statistics_data:
ctu_data = None
if ctu_collect or ctu_analyze:
ctu_data = manager.dict(__get_ctu_data(config_map, ctu_dir))
pre_analyze = [a for a in actions
if a.analyzer_type == ClangSA.ANALYZER_NAME]
pre_anal_skip_handler = None
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_anal_skip_handler = skip_handler
clangsa_config = config_map.get(ClangSA.ANALYZER_NAME)
if clangsa_config is not None:
pre_analysis_manager.run_pre_analysis(pre_analyze,
context,
clangsa_config,
args.jobs,
pre_anal_skip_handler,
ctu_data,
statistics_data,
manager)
else:
LOG.error("Can not run pre analysis without clang "
"static analyzer configuration.")
if 'stats_output' in args and args.stats_output:
return
if 'stats_dir' in args and args.stats_dir:
statistics_data = manager.dict({'stats_out_dir': args.stats_dir})
if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
LOG.info("Starting static analysis ...")
analysis_manager.start_workers(actions_map, actions, context,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
else None,
ctu_reanalyze_on_failure,
statistics_data,
manager,
compile_cmd_count)
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}
if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
manager.shutdown()
|
45,743 |
def forecast(
precip,
velocity,
timesteps,
precip_thr=None,
n_cascade_levels=6,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
ar_order=2,
conditional=False,
probmatching_method="cdf",
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the Spectral Prognosis (S-PROG) method.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between
the inputs are assumed to be regular.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the
advection field.
The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
precip_thr: float, required
The threshold value for minimum observable precipitation intensity.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are
below the threshold precip_thr.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the conditional statistics of the forecast field
(areas with precipitation intensity above the threshold precip_thr) with
those of the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, 'mean'=adjust only the mean value,
None=no matching applied.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical S-PROG model). If "spectral", the AR(2) models are applied
directly in the spectral domain to reduce memory footprint and improve
performance :cite:`PCH2019a`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input precipitation fields
precip. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface
References
----------
:cite:`Seed2003`, :cite:`PCH2019a`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if precip_thr is None:
raise ValueError("precip_thr required but not specified")
print("Computing S-PROG nowcast")
print("------------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {precip.shape[1]}x{precip.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print("conditional statistics: {}".format("yes" if conditional else "no"))
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print(f"domain: {domain}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print(f"precip. intensity threshold: {precip_thr}")
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers)
m, n = precip.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((m, n), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
precip = precip[-(ar_order + 1) :, :, :].copy()
precip_min = np.nanmin(precip)
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(precip[i, :]) for i in range(precip.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
mask_thr = np.logical_and.reduce(
[precip[i, :, :] >= precip_thr for i in range(precip.shape[0])]
)
else:
mask_thr = None
# initialize the extrapolator
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
res = list()
def f(precip, i):
return extrapolator_method(
precip[i, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not DASK_IMPORTED:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
# replace non-finite values with the minimum value
precip = precip.copy()
for i in range(precip.shape[0]):
precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :])
# compute the cascade decompositions of the input precipitation fields
precip_d = []
for i in range(ar_order + 1):
precip_ = decomp_method(
precip[i, :, :],
filter,
mask=mask_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
precip_d.append(precip_)
# rearrange the cascade levels into a four-dimensional array of shape
# (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=True
)
# compute lag-l temporal autocorrelation coefficients for each cascade level
gamma = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
if domain == "spatial":
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], mask=mask_thr
)
else:
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], domain="spectral", x_shape=precip.shape[1:]
)
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=False
)
precip_d = precip_d[-1]
nowcast_utils.print_corrcoefs(gamma)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
phi = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :])
nowcast_utils.print_ar_params(phi)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)]
if probmatching_method == "mean":
mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr])
else:
mu_0 = None
# compute precipitation mask and wet area ratio
mask_p = precip[-1, :, :] >= precip_thr
war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2])
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
precip_f = []
state = {"precip_c": precip_c, "precip_d": precip_d}
params = {
"domain": domain,
"domain_mask": domain_mask,
"fft": fft,
"mu_0": mu_0,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"precip_0": precip,
"precip_min": precip_min,
"probmatching_method": probmatching_method,
"recomp_method": recomp_method,
"war": war,
}
precip_f = nowcast_main_loop(
precip,
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
precip_f = np.stack(precip_f)
if measure_time:
return precip_f, init_time, mainloop_time
else:
return precip_f
|
def forecast(
precip,
velocity,
timesteps,
precip_thr=None,
n_cascade_levels=6,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
ar_order=2,
conditional=False,
probmatching_method="cdf",
num_workers=1,
fft_method="numpy",
domain="spatial",
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast by using the Spectral Prognosis (S-PROG) method.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between
the inputs are assumed to be regular.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the
advection field.
The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
precip_thr: float, required
The threshold value for minimum observable precipitation intensity.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}, optional
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}, optional
Name of the bandpass filter method to use with the cascade decomposition.
See the documentation of pysteps.cascade.interface.
ar_order: int, optional
The order of the autoregressive model to use. Must be >= 1.
conditional: bool, optional
If set to True, compute the statistics of the precipitation field
conditionally by excluding pixels where the values are
below the threshold precip_thr.
probmatching_method: {'cdf','mean',None}, optional
Method for matching the conditional statistics of the forecast field
(areas with precipitation intensity above the threshold precip_thr) with
those of the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, 'mean'=adjust only the mean value,
None=no matching applied.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if dask
is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
domain: {"spatial", "spectral"}
If "spatial", all computations are done in the spatial domain (the
classical S-PROG model). If "spectral", the AR(2) models are applied
directly in the spectral domain to reduce memory footprint and improve
performance :cite:`PCH2019a`.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input precipitation fields
precip. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface
References
----------
:cite:`Seed2003`, :cite:`PCH2019a`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
if filter_kwargs is None:
filter_kwargs = dict()
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if precip_thr is None:
raise ValueError("precip_thr required but not specified")
print("Computing S-PROG nowcast")
print("------------------------")
print("")
print("Inputs")
print("------")
print(f"input dimensions: {precip.shape[1]}x{precip.shape[2]}")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print("conditional statistics: {}".format("yes" if conditional else "no"))
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print(f"domain: {domain}")
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"parallel threads: {num_workers}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print(f"precip. intensity threshold: {precip_thr}")
if measure_time:
starttime_init = time.time()
fft = utils.get_method(fft_method, shape=precip.shape[1:], n_threads=num_workers)
m, n = precip.shape[1:]
# initialize the band-pass filter
filter_method = cascade.get_method(bandpass_filter_method)
filter = filter_method((m, n), n_cascade_levels, **filter_kwargs)
decomp_method, recomp_method = cascade.get_method(decomp_method)
extrapolator_method = extrapolation.get_method(extrap_method)
precip = precip[-(ar_order + 1) :, :, :].copy()
precip_min = np.nanmin(precip)
# determine the domain mask from non-finite values
domain_mask = np.logical_or.reduce(
[~np.isfinite(precip[i, :]) for i in range(precip.shape[0])]
)
# determine the precipitation threshold mask
if conditional:
mask_thr = np.logical_and.reduce(
[precip[i, :, :] >= precip_thr for i in range(precip.shape[0])]
)
else:
mask_thr = None
# initialize the extrapolator
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
res = list()
def f(precip, i):
return extrapolator_method(
precip[i, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not DASK_IMPORTED:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if DASK_IMPORTED:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
# replace non-finite values with the minimum value
precip = precip.copy()
for i in range(precip.shape[0]):
precip[i, ~np.isfinite(precip[i, :])] = np.nanmin(precip[i, :])
# compute the cascade decompositions of the input precipitation fields
precip_d = []
for i in range(ar_order + 1):
precip_ = decomp_method(
precip[i, :, :],
filter,
mask=mask_thr,
fft_method=fft,
output_domain=domain,
normalize=True,
compute_stats=True,
compact_output=True,
)
precip_d.append(precip_)
# rearrange the cascade levels into a four-dimensional array of shape
# (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=True
)
# compute lag-l temporal autocorrelation coefficients for each cascade level
gamma = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
if domain == "spatial":
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], mask=mask_thr
)
else:
gamma[i, :] = correlation.temporal_autocorrelation(
precip_c[i], domain="spectral", x_shape=precip.shape[1:]
)
precip_c = nowcast_utils.stack_cascades(
precip_d, n_cascade_levels, convert_to_full_arrays=False
)
precip_d = precip_d[-1]
nowcast_utils.print_corrcoefs(gamma)
if ar_order == 2:
# adjust the lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
gamma[i, 1] = autoregression.adjust_lag2_corrcoef2(gamma[i, 0], gamma[i, 1])
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
phi = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
phi[i, :] = autoregression.estimate_ar_params_yw(gamma[i, :])
nowcast_utils.print_ar_params(phi)
# discard all except the p-1 last cascades because they are not needed for
# the AR(p) model
precip_c = [precip_c[i][-ar_order:] for i in range(n_cascade_levels)]
if probmatching_method == "mean":
mu_0 = np.mean(precip[-1, :, :][precip[-1, :, :] >= precip_thr])
else:
mu_0 = None
# compute precipitation mask and wet area ratio
mask_p = precip[-1, :, :] >= precip_thr
war = 1.0 * np.sum(mask_p) / (precip.shape[1] * precip.shape[2])
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
precip_forecast = []
state = {"precip_c": precip_c, "precip_d": precip_d}
params = {
"domain": domain,
"domain_mask": domain_mask,
"fft": fft,
"mu_0": mu_0,
"n_cascade_levels": n_cascade_levels,
"phi": phi,
"precip_0": precip,
"precip_min": precip_min,
"probmatching_method": probmatching_method,
"recomp_method": recomp_method,
"war": war,
}
precip_f = nowcast_main_loop(
precip,
velocity,
state,
timesteps,
extrap_method,
_update,
extrap_kwargs=extrap_kwargs,
params=params,
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
precip_f = np.stack(precip_f)
if measure_time:
return precip_f, init_time, mainloop_time
else:
return precip_f
|
7,521 |
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
The coordinate returned is the apparent position, corrected for the
light-travel time to the object.
"""
if location is None:
location = time.location
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
else:
obsgeoloc, obsgeovel = None, None
cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc)
icrs = ICRS(cartrep)
gcrs = icrs.transform_to(GCRS(obstime=time,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
return SkyCoord(gcrs)
|
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
The coordinate returned is the apparent position, corrected for the
light-travel time to the location on Earth.
"""
if location is None:
location = time.location
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
else:
obsgeoloc, obsgeovel = None, None
cartrep = _get_apparent_body_position(body, time, ephemeris, obsgeoloc)
icrs = ICRS(cartrep)
gcrs = icrs.transform_to(GCRS(obstime=time,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
return SkyCoord(gcrs)
|
43,684 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'n' must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
29,260 |
def lookup_pr(owner, repo, pull_number):
"""Lookup a PR using the GitHub API.
Args:
owner: str. Owner of the repository the PR is in.
repo: str. Repository the PR is in.
pull_number: str. PR number.
Returns:
dict. JSON object returned by the GitHub API v3. This is an
empty dictionary if the response code from the GitHub API is not
200.
"""
request = urlrequest.Request(
GITHUB_API_PR_ENDPOINT % (owner, repo, pull_number),
None,
{'Accept': 'application/vnd.github.v3+json'})
response = utils.url_open(request)
if response.getcode() != 200:
return {}
pr = json.load(response)
response.close()
return pr
|
def lookup_pr(owner, repo, pull_number):
"""Lookup a PR using the GitHub API.
Args:
owner: str. Owner of the repository the PR is in.
repo: str. Repository the PR is in.
pull_number: str. PR number.
Returns:
dict. JSON object returned by the GitHub API v3. This is an
empty dictionary if the response code from the GitHub API is not
200.
"""
request = urllib.request.Request(
GITHUB_API_PR_ENDPOINT % (owner, repo, pull_number),
None,
{'Accept': 'application/vnd.github.v3+json'})
response = utils.url_open(request)
if response.getcode() != 200:
return {}
pr = json.load(response)
response.close()
return pr
|
41,504 |
def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu` for models with
bounded POI.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu tilde test statistic used for fit configuration with POI not bounded at zero. Use tmu.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu` for models with
bounded POI.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu tilde test statistic used for fit configuration with POI not bounded at zero. Use tmu.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
2,290 |
def _make_array_out(X_out, index, get_feature_names_out, *,
array_out="default"):
"""Construct array container based on global configuration.
Parameters
----------
X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out)
Output data to be wrapped.
index: array-like of shape (n_samples,)
Index of output data.
get_features_names_out: callable
Returns the feature names out. If the callable returns None, then
the feature names will be ["X0", "X1", ...].
array_out : {"default", "pandas"}, default="default"
Specify the output array type. If "pandas", a pandas DataFrame is
returned. If "default", an array-like without feature names is
returned.
Return
------
array_out: {ndarray, sparse matrix, dataframe} of shape \
(n_samples, n_features_out)
Wrapped array with feature names.
"""
if array_out not in {'default', 'pandas'}:
raise ValueError("array_out must be 'default' or 'pandas'")
if array_out == "default":
return X_out
feature_names_out = get_feature_names_out()
if feature_names_out is None:
feature_names_out = [f'X{i}' for i in range(X_out.shape[1])]
# array_out == "pandas"
import pandas as pd
if sp_sparse.issparse(X_out):
make_dataframe = pd.DataFrame.sparse.from_spmatrix
else:
make_dataframe = pd.DataFrame
return make_dataframe(X_out, columns=feature_names_out, index=index)
|
def _make_array_out(X_out, *, index, get_feature_names_out,
array_out="default"):
"""Construct array container based on global configuration.
Parameters
----------
X_out: {ndarray, sparse matrix} of shape (n_samples, n_features_out)
Output data to be wrapped.
index: array-like of shape (n_samples,)
Index of output data.
get_features_names_out: callable
Returns the feature names out. If the callable returns None, then
the feature names will be ["X0", "X1", ...].
array_out : {"default", "pandas"}, default="default"
Specify the output array type. If "pandas", a pandas DataFrame is
returned. If "default", an array-like without feature names is
returned.
Return
------
array_out: {ndarray, sparse matrix, dataframe} of shape \
(n_samples, n_features_out)
Wrapped array with feature names.
"""
if array_out not in {'default', 'pandas'}:
raise ValueError("array_out must be 'default' or 'pandas'")
if array_out == "default":
return X_out
feature_names_out = get_feature_names_out()
if feature_names_out is None:
feature_names_out = [f'X{i}' for i in range(X_out.shape[1])]
# array_out == "pandas"
import pandas as pd
if sp_sparse.issparse(X_out):
make_dataframe = pd.DataFrame.sparse.from_spmatrix
else:
make_dataframe = pd.DataFrame
return make_dataframe(X_out, columns=feature_names_out, index=index)
|
23,831 |
def test_deactivate_location():
conanfile = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import Environment
class Pkg(ConanFile):
def package_info(self):
self.buildenv_info.define("FOO", "BAR")
""")
client = TestClient()
client.save({"pkg.py": conanfile})
client.run("create pkg.py pkg/1.0@")
conanfile_txt = textwrap.dedent(r"""
[requires]
pkg/1.0
[generators]
VirtualBuildEnv
""")
client.save({"conanfile.txt": conanfile_txt})
client.run("install conanfile.txt --install-folder=myfolder -s build_type=Release -s arch=x86_64")
if platform.system() == "Windows":
cmd = "./myfolder/conanbuild.bat"
else:
cmd = '. ./myfolder/conanbuild.sh'
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
cwd=client.current_folder).communicate()
assert not os.path.exists(os.path.join(client.current_folder,
"deactivate_conanbuildenv-release-x86_64.sh"))
assert os.path.exists(os.path.join(client.current_folder, "myfolder",
"deactivate_conanbuildenv-release-x86_64.sh"))
|
def test_deactivate_location():
conanfile = textwrap.dedent(r"""
from conans import ConanFile
from conan.tools.env import Environment
class Pkg(ConanFile):
def package_info(self):
self.buildenv_info.define("FOO", "BAR")
""")
client = TestClient()
client.save({"pkg.py": conanfile})
client.run("create pkg.py pkg/1.0@")
client.run("install pkg/1.0@ -g VirtualBuildEnv --install-folder=myfolder -s build_type=Release -s arch=x86_64")
if platform.system() == "Windows":
cmd = "./myfolder/conanbuild.bat"
else:
cmd = '. ./myfolder/conanbuild.sh'
subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
cwd=client.current_folder).communicate()
assert not os.path.exists(os.path.join(client.current_folder,
"deactivate_conanbuildenv-release-x86_64.sh"))
assert os.path.exists(os.path.join(client.current_folder, "myfolder",
"deactivate_conanbuildenv-release-x86_64.sh"))
|
52,685 |
def _count_class_sample(y):
unique, counts = np.unique(y, return_counts=True)
if is_dask_container(unique):
unique, counts = unique.compute(), counts.compute()
return dict(zip(unique, counts))
|
def _count_class_sample(y):
unique, counts = np.unique(y, return_counts=True)
if is_dask_container(unique):
unique, counts = dask.compute(unique, counts)
return dict(zip(unique, counts))
|
10,917 |
def update_build_option(key, value):
"""
Update build option with specified name to given value.
WARNING: Use this with care, the build options are not expected to be changed during an EasyBuild session!
"""
# BuildOptions() is a (singleton) frozen dict, so this is less straightforward that it seems...
build_options = BuildOptions()
orig_value = build_options._FrozenDict__dict[key]
build_options._FrozenDict__dict[key] = value
_log.warning("Build option '%s' was updated to: %s", key, build_option(key))
# Return original value, so they can be restored later if needed
return orig_value
|
def update_build_option(key, value):
"""
Update build option with specified name to given value.
WARNING: Use this with care, the build options are not expected to be changed during an EasyBuild session!
"""
# BuildOptions() is a (singleton) frozen dict, so this is less straightforward that it seems...
build_options = BuildOptions()
orig_value = build_options._FrozenDict__dict[key]
build_options._FrozenDict__dict[key] = value
_log.warning("Build option '%s' was updated to: %s", key, build_option(key))
# Return original value, so it can be restored later if needed
return orig_value
|
25,625 |
def _print_summary_output_string(module: tf.Module, fmt: str = None) -> str:
fmt = fmt if fmt is not None else summary_fmt()
column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']
def get_name(v):
return v.__class__.__name__
def get_transform(v):
if hasattr(v, "transform") and v.transform is not None:
return v.transform.__class__.__name__
return None
merged_leaf_components = _merge_leaf_components(leaf_components(module))
column_values = [[
path,
get_name(variable),
get_transform(variable),
variable.trainable,
variable.shape,
variable.dtype.name,
_str_tensor_value(variable.numpy())
] for path, variable in merged_leaf_components.items()]
return tabulate(column_values, headers=column_names, tablefmt=fmt)
|
def tabulate_module_summary(module: tf.Module, tablefmt: str = None) -> str:
fmt = fmt if fmt is not None else summary_fmt()
column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']
def get_name(v):
return v.__class__.__name__
def get_transform(v):
if hasattr(v, "transform") and v.transform is not None:
return v.transform.__class__.__name__
return None
merged_leaf_components = _merge_leaf_components(leaf_components(module))
column_values = [[
path,
get_name(variable),
get_transform(variable),
variable.trainable,
variable.shape,
variable.dtype.name,
_str_tensor_value(variable.numpy())
] for path, variable in merged_leaf_components.items()]
return tabulate(column_values, headers=column_names, tablefmt=fmt)
|
23,183 |
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_type_sets = [{"O", "S", "U", "a"}] # treat object and strings alike
if numeric_equal:
eq_type_sets.append({"i", "f", "u"})
def eq_dtypes(a, b):
return any(
a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets
) or (a == b)
if not is_dask_collection(res) and is_dataframe_like(res):
for _, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():
assert eq_dtypes(a, b)
elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):
a = ddf._meta.dtype
b = res.dtype
assert eq_dtypes(a, b)
else:
if hasattr(ddf._meta, "dtype"):
a = ddf._meta.dtype
if not hasattr(res, "dtype"):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert eq_dtypes(a, b)
else:
assert type(ddf._meta) == type(res)
|
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_type_sets = [{"O", "S", "U", "a"}] # treat object and strings alike
if numeric_equal:
eq_type_sets.append({"i", "f", "u"})
def eq_dtypes(a, b):
return any(
a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets
) or (a == b)
if not is_dask_collection(res) and is_dataframe_like(res):
for a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples(index=False):
assert eq_dtypes(a, b)
elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):
a = ddf._meta.dtype
b = res.dtype
assert eq_dtypes(a, b)
else:
if hasattr(ddf._meta, "dtype"):
a = ddf._meta.dtype
if not hasattr(res, "dtype"):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert eq_dtypes(a, b)
else:
assert type(ddf._meta) == type(res)
|
50,659 |
def expand(vevent, href=''):
"""
Constructs a list of start and end dates for all recurring instances of the
event defined in vevent.
It considers RRULE as well as RDATE and EXDATE properties. In case of
unsupported recursion rules an UnsupportedRecurrence exception is thrown.
If the vevent contains a RECURRENCE-ID property, no expansion is done,
the function still returns a tuple of start and end (date)times.
:param vevent: vevent to be expanded
:type vevent: icalendar.cal.Event
:param href: the href of the vevent, used for more informative logging and
nothing else
:type href: str
:returns: list of start and end (date)times of the expanded event
:rtype: list(tuple(datetime, datetime))
"""
# we do this now and than never care about the "real" end time again
if 'DURATION' in vevent:
duration = vevent['DURATION'].dt
else:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
# if this vevent has a RECURRENCE_ID property, no expansion will be
# performed
expand = not bool(vevent.get('RECURRENCE-ID'))
events_tz = getattr(vevent['DTSTART'].dt, 'tzinfo', None)
allday = not isinstance(vevent['DTSTART'].dt, dt.datetime)
def sanitize_datetime(date):
if allday and isinstance(date, dt.datetime):
date = date.date()
if events_tz is not None:
date = events_tz.localize(date)
return date
rrule_param = vevent.get('RRULE')
if expand and rrule_param is not None:
vevent = sanitize_rrule(vevent)
# dst causes problem while expanding the rrule, therefore we transform
# everything to naive datetime objects and transform back after
# expanding
# See https://github.com/dateutil/dateutil/issues/102
dtstart = vevent['DTSTART'].dt
if events_tz:
dtstart = dtstart.replace(tzinfo=None)
rrule = dateutil.rrule.rrulestr(
rrule_param.to_ical().decode(),
dtstart=dtstart,
ignoretz=True,
)
if rrule._until is None:
# rrule really doesn't like to calculate all recurrences until
# eternity, so we only do it until 2037, because a) I'm not sure
# if python can deal with larger datetime values yet and b) pytz
# doesn't know any larger transition times
rrule._until = dt.datetime(2037, 12, 31)
else:
if events_tz and 'Z' in rrule_param.to_ical().decode():
rrule._until = pytz.UTC.localize(
rrule._until).astimezone(events_tz).replace(tzinfo=None)
# rrule._until and dtstart could be dt.date or dt.datetime. They
# need to be the same for comparison
testuntil = rrule._until
if (type(dtstart) == dt.date and type(testuntil) == dt.datetime):
testuntil = testuntil.date()
teststart = dtstart
if (type(testuntil) == dt.date and type(teststart) == dt.datetime):
teststart = teststart.date()
if testuntil < teststart:
logger.warning(
'{}: Unsupported recurrence. UNTIL is before DTSTART.\n'
'This event will not be available in khal.'.format(href))
return False
if rrule.count() == 0:
logger.warning(
'{}: Recurrence defined but will never occur.\n'
'This event will not be available in khal.'.format(href))
return False
rrule = map(sanitize_datetime, rrule)
logger.debug(f'calculating recurrence dates for {href}, this might take some time.')
# RRULE and RDATE may specify the same date twice, it is recommended by
# the RFC to consider this as only one instance
dtstartl = set(rrule)
if not dtstartl:
raise UnsupportedRecurrence()
else:
dtstartl = {vevent['DTSTART'].dt}
def get_dates(vevent, key):
# TODO replace with get_all_properties
dates = vevent.get(key)
if dates is None:
return
if not isinstance(dates, list):
dates = [dates]
dates = (leaf.dt for tree in dates for leaf in tree.dts)
dates = localize_strip_tz(dates, events_tz)
return map(sanitize_datetime, dates)
# include explicitly specified recursion dates
if expand:
dtstartl.update(get_dates(vevent, 'RDATE') or ())
# remove excluded dates
if expand:
for date in get_dates(vevent, 'EXDATE') or ():
try:
dtstartl.remove(date)
except KeyError:
logger.warning(
'In event {}, excluded instance starting at {} not found, '
'event might be invalid.'.format(href, date))
dtstartend = [(start, start + duration) for start in dtstartl]
# not necessary, but I prefer deterministic output
dtstartend.sort()
return dtstartend
|
def expand(vevent, href=''):
"""
Constructs a list of start and end dates for all recurring instances of the
event defined in vevent.
It considers RRULE as well as RDATE and EXDATE properties. In case of
unsupported recursion rules an UnsupportedRecurrence exception is thrown.
If the vevent contains a RECURRENCE-ID property, no expansion is done,
the function still returns a tuple of start and end (date)times.
:param vevent: vevent to be expanded
:type vevent: icalendar.cal.Event
:param href: the href of the vevent, used for more informative logging and
nothing else
:type href: str
:returns: list of start and end (date)times of the expanded event
:rtype: list(tuple(datetime, datetime))
"""
# we do this now and than never care about the "real" end time again
if 'DURATION' in vevent:
duration = vevent['DURATION'].dt
else:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
# if this vevent has a RECURRENCE_ID property, no expansion will be
# performed
expand = not bool(vevent.get('RECURRENCE-ID'))
events_tz = getattr(vevent['DTSTART'].dt, 'tzinfo', None)
allday = not isinstance(vevent['DTSTART'].dt, dt.datetime)
def sanitize_datetime(date):
if allday and isinstance(date, dt.datetime):
date = date.date()
if events_tz is not None:
date = events_tz.localize(date)
return date
rrule_param = vevent.get('RRULE')
if expand and rrule_param is not None:
vevent = sanitize_rrule(vevent)
# dst causes problem while expanding the rrule, therefore we transform
# everything to naive datetime objects and transform back after
# expanding
# See https://github.com/dateutil/dateutil/issues/102
dtstart = vevent['DTSTART'].dt
if events_tz:
dtstart = dtstart.replace(tzinfo=None)
rrule = dateutil.rrule.rrulestr(
rrule_param.to_ical().decode(),
dtstart=dtstart,
ignoretz=True,
)
if rrule._until is None:
# rrule really doesn't like to calculate all recurrences until
# eternity, so we only do it until 2037, because a) I'm not sure
# if python can deal with larger datetime values yet and b) pytz
# doesn't know any larger transition times
rrule._until = dt.datetime(2037, 12, 31)
else:
if events_tz and 'Z' in rrule_param.to_ical().decode():
rrule._until = pytz.UTC.localize(
rrule._until).astimezone(events_tz).replace(tzinfo=None)
# rrule._until and dtstart could be dt.date or dt.datetime. They
# need to be the same for comparison
testuntil = rrule._until
if (type(dtstart) == dt.date and type(testuntil) == dt.datetime):
testuntil = testuntil.date()
teststart = dtstart
if (type(testuntil) == dt.date and type(teststart) == dt.datetime):
teststart = teststart.date()
if testuntil < teststart:
logger.warning(
f'{href}: Unsupported recurrence. UNTIL is before DTSTART.\n'
'This event will not be available in khal.')
return False
if rrule.count() == 0:
logger.warning(
'{}: Recurrence defined but will never occur.\n'
'This event will not be available in khal.'.format(href))
return False
rrule = map(sanitize_datetime, rrule)
logger.debug(f'calculating recurrence dates for {href}, this might take some time.')
# RRULE and RDATE may specify the same date twice, it is recommended by
# the RFC to consider this as only one instance
dtstartl = set(rrule)
if not dtstartl:
raise UnsupportedRecurrence()
else:
dtstartl = {vevent['DTSTART'].dt}
def get_dates(vevent, key):
# TODO replace with get_all_properties
dates = vevent.get(key)
if dates is None:
return
if not isinstance(dates, list):
dates = [dates]
dates = (leaf.dt for tree in dates for leaf in tree.dts)
dates = localize_strip_tz(dates, events_tz)
return map(sanitize_datetime, dates)
# include explicitly specified recursion dates
if expand:
dtstartl.update(get_dates(vevent, 'RDATE') or ())
# remove excluded dates
if expand:
for date in get_dates(vevent, 'EXDATE') or ():
try:
dtstartl.remove(date)
except KeyError:
logger.warning(
'In event {}, excluded instance starting at {} not found, '
'event might be invalid.'.format(href, date))
dtstartend = [(start, start + duration) for start in dtstartl]
# not necessary, but I prefer deterministic output
dtstartend.sort()
return dtstartend
|
57,929 |
def list_user_policies(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName', "")
marker = args.get('marker', None)
limit, is_manual, page_size = get_limit(args)
kwargs = {
'UserName': user_name,
'MaxItems': limit
}
if marker:
kwargs.update({'Marker': marker})
response = client.list_user_policies(**kwargs)
data = response.get('PolicyNames', [])
marker = response.get('Marker', None)
if is_manual and page_size and len(data) > page_size:
data = data[-1 * args.get('page_size'):]
policy_data = []
for policy in data:
policy_data.append({
'UserName': user_name,
'PolicyName': policy,
})
ec = {'AWS.IAM.UserPolicies(val.PolicyName && val.UserName && val.PolicyName === obj.PolicyName && '
'val.UserName === obj.UserName)': policy_data,
'AWS.IAM.Users(val.UserName === \'{}\').InlinePoliciesMarker'.format(user_name): marker}
human_readable = tableToMarkdown('AWS IAM Policies for user {}'.format(user_name),
headers=["PolicyNames"],
headerTransform=pascalToSpace,
t=data)
return_outputs(human_readable, ec)
|
def list_user_policies(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName', "")
marker = args.get('marker', None)
limit, is_manual, page_size = get_limit(args)
kwargs = {
'UserName': user_name,
'MaxItems': limit
}
if marker:
kwargs.update({'Marker': marker})
response = client.list_user_policies(**kwargs)
data = response.get('PolicyNames', [])
marker = response.get('Marker', None)
if is_manual and page_size and len(data) > page_size:
data = data[-1 * args.get('page_size'):]
policy_data = [{
'UserName': user_name,
'PolicyName': policy,
} for policy in data]
ec = {'AWS.IAM.UserPolicies(val.PolicyName && val.UserName && val.PolicyName === obj.PolicyName && '
'val.UserName === obj.UserName)': policy_data,
'AWS.IAM.Users(val.UserName === \'{}\').InlinePoliciesMarker'.format(user_name): marker}
human_readable = tableToMarkdown('AWS IAM Policies for user {}'.format(user_name),
headers=["PolicyNames"],
headerTransform=pascalToSpace,
t=data)
return_outputs(human_readable, ec)
|
56,808 |
def _two_factor_required(view_func, domain, couch_user, request):
if (ENTERPRISE_SSO.enabled_for_request(request)
and is_request_using_sso(request)):
# SSO authenticated users manage two-factor auth on the Identity Provider
# level, so CommCare HQ does not attempt 2FA with them. This is one of
# the reasons we require that domains establish TrustedIdentityProvider
# relationships.
return False
exempt = getattr(view_func, 'two_factor_exempt', False)
if exempt:
return False
if not couch_user:
return False
return (
# If a user is a superuser, then there is no two_factor_disabled loophole allowed.
# If you lose your phone, you have to give up superuser privileges
# until you have two factor set up again.
settings.REQUIRE_TWO_FACTOR_FOR_SUPERUSERS and couch_user.is_superuser
) or (
# For other policies requiring two factor auth,
# allow the two_factor_disabled loophole for people who have lost their phones
# and need time to set up two factor auth again.
(domain.two_factor_auth or TWO_FACTOR_SUPERUSER_ROLLOUT.enabled(couch_user.username))
and not couch_user.two_factor_disabled
)
|
def _two_factor_required(view_func, request):
domain = request.project
couch_user = request.couch_user
if (ENTERPRISE_SSO.enabled_for_request(request)
and is_request_using_sso(request)):
# SSO authenticated users manage two-factor auth on the Identity Provider
# level, so CommCare HQ does not attempt 2FA with them. This is one of
# the reasons we require that domains establish TrustedIdentityProvider
# relationships.
return False
exempt = getattr(view_func, 'two_factor_exempt', False)
if exempt:
return False
if not couch_user:
return False
return (
# If a user is a superuser, then there is no two_factor_disabled loophole allowed.
# If you lose your phone, you have to give up superuser privileges
# until you have two factor set up again.
settings.REQUIRE_TWO_FACTOR_FOR_SUPERUSERS and couch_user.is_superuser
) or (
# For other policies requiring two factor auth,
# allow the two_factor_disabled loophole for people who have lost their phones
# and need time to set up two factor auth again.
(domain.two_factor_auth or TWO_FACTOR_SUPERUSER_ROLLOUT.enabled(couch_user.username))
and not couch_user.two_factor_disabled
)
|
30,363 |
def verify_map_equals(values_map1, values_map2, equality_map):
if not equality_map or len(equality_map) == 0:
return True
if not values_map1 or len(values_map1) == 0 or not values_map2 or len(values_map2) == 0:
return False
for key in equality_map:
if key not in values_map1 or key not in values_map2:
return False
value1 = values_map1[key]
value2 = values_map2[key]
if isinstance(value1, basestring) and isinstance(value2, basestring):
is_values_equals = is_text_equal_by_x_different_words(values_map1[key], values_map2[key], equality_map[key])
if not is_values_equals:
return False
elif isinstance(value1, list) and isinstance(value2, list):
try:
return set(value1) == set(value2)
except Exception:
return value1 == value2
else:
return value1 == value2
return True
|
def verify_map_equals(values_map1, values_map2, equality_map):
if not equality_map:
return True
if not values_map1 or len(values_map1) == 0 or not values_map2 or len(values_map2) == 0:
return False
for key in equality_map:
if key not in values_map1 or key not in values_map2:
return False
value1 = values_map1[key]
value2 = values_map2[key]
if isinstance(value1, basestring) and isinstance(value2, basestring):
is_values_equals = is_text_equal_by_x_different_words(values_map1[key], values_map2[key], equality_map[key])
if not is_values_equals:
return False
elif isinstance(value1, list) and isinstance(value2, list):
try:
return set(value1) == set(value2)
except Exception:
return value1 == value2
else:
return value1 == value2
return True
|
14,570 |
def eye(
timebase,
is_alive_flag,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
version,
eye_id,
overwrite_cap_settings=None,
hide_ui=False,
hwm=None,
):
"""reads eye video and detects the pupil.
Creates a window, gl context.
Grabs images from a capture.
Streams Pupil coordinates.
Reacts to notifications:
``set_detection_mapping_mode``: Sets detection method
``eye_process.should_stop``: Stops the eye process
``recording.started``: Starts recording eye video
``recording.stopped``: Stops recording eye video
``frame_publishing.started``: Starts frame publishing
``frame_publishing.stopped``: Stops frame publishing
``start_eye_plugin``: Start plugins in eye process
Emits notifications:
``eye_process.started``: Eye process started
``eye_process.stopped``: Eye process stopped
Emits data:
``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
"""
# We deferr the imports becasue of multiprocessing.
# Otherwise the world process each process also loads the other imports.
import zmq
import zmq_tools
zmq_ctx = zmq.Context()
ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url, hwm)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# logging setup
import logging
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.NOTSET)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
if is_alive_flag.value:
# indicates eye process that this is a duplicated startup
logger.warning("Aborting redundant eye process startup")
return
with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
# general imports
import traceback
import numpy as np
import cv2
# display
import glfw
from pyglui import ui, graph, cygl
from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
from pyglui.cygl.utils import Named_Texture
from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
from gl_utils import make_coord_system_pixel_based
from gl_utils import make_coord_system_norm_based
from gl_utils import is_window_visible, glViewport
# monitoring
import psutil
# Plug-ins
from plugin import Plugin_List
# helpers/utils
from uvc import get_time_monotonic
from file_methods import Persistent_Dict
from version_utils import VersionFormat
from methods import normalize, denormalize, timer
from av_writer import JPEG_Writer, MPEG_Writer, NonMonotonicTimestampError
from ndsi import H264Writer
from video_capture import source_classes, manager_classes
from roi import Roi
from background_helper import IPC_Logging_Task_Proxy
from pupil_detector_plugins import available_detector_plugins
from pupil_detector_plugins.manager import PupilDetectorManager
IPC_Logging_Task_Proxy.push_url = ipc_push_url
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible for
# shutting down the eye process properly
signal.signal(signal.SIGINT, interrupt_handler)
# UI Platform tweaks
if platform.system() == "Linux":
scroll_factor = 10.0
window_position_default = (600, 300 * eye_id + 30)
elif platform.system() == "Windows":
scroll_factor = 10.0
window_position_default = (600, 90 + 300 * eye_id)
else:
scroll_factor = 1.0
window_position_default = (600, 300 * eye_id)
icon_bar_width = 50
window_size = None
hdpi_factor = 1.0
# g_pool holds variables for this process
g_pool = SimpleNamespace()
# make some constants avaiable
g_pool.user_dir = user_dir
g_pool.version = version
g_pool.app = "capture"
g_pool.eye_id = eye_id
g_pool.process = f"eye{eye_id}"
g_pool.timebase = timebase
g_pool.camera_render_size = None
g_pool.ipc_pub = ipc_socket
def get_timestamp():
return get_time_monotonic() - g_pool.timebase.value
g_pool.get_timestamp = get_timestamp
g_pool.get_now = get_time_monotonic
default_detector_cls, available_detectors = available_detector_plugins()
plugins = (
manager_classes
+ source_classes
+ available_detectors
+ [PupilDetectorManager, Roi]
)
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
preferred_names = [
f"Pupil Cam3 ID{eye_id}",
f"Pupil Cam2 ID{eye_id}",
f"Pupil Cam1 ID{eye_id}",
]
if eye_id == 0:
preferred_names += ["HD-6000"]
default_capture_name = "UVC_Source"
default_capture_settings = {
"preferred_names": preferred_names,
"frame_size": (320, 240),
"frame_rate": 120,
}
default_plugins = [
# TODO: extend with plugins
(default_capture_name, default_capture_settings),
("UVC_Manager", {}),
("NDSI_Manager", {}),
("HMD_Streaming_Manager", {}),
("File_Manager", {}),
# Detector needs to be loaded first to set `g_pool.pupil_detector`
(default_detector_cls.__name__, {}),
("PupilDetectorManager", {}),
("Roi", {}),
]
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
nonlocal hdpi_factor
active_window = glfw.glfwGetCurrentContext()
glfw.glfwMakeContextCurrent(window)
hdpi_factor = glfw.getHDPIFactor(window)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
g_pool.gui.update_window(w, h)
g_pool.gui.collect_menus()
for g in g_pool.graphs:
g.scale = hdpi_factor
g.adjust_window_size(w, h)
adjust_gl_view(w, h)
glfw.glfwMakeContextCurrent(active_window)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_iconify(window, iconified):
g_pool.iconified = iconified
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
paths = [paths[x].decode("utf-8") for x in range(count)]
for plugin in g_pool.plugins:
if plugin.on_drop(paths):
break
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
)
if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
logger.info(
"Session setting are from a different version of this app. I will not use those."
)
session_settings.clear()
g_pool.iconified = False
g_pool.capture = None
g_pool.flip = session_settings.get("flip", False)
g_pool.display_mode = session_settings.get("display_mode", "camera_image")
g_pool.display_mode_info_text = {
"camera_image": "Raw eye camera image. This uses the least amount of CPU power",
"roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
"algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
}
def set_display_mode_info(val):
g_pool.display_mode = val
g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
# Initialize glfw
glfw.glfwInit()
if hide_ui:
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # hide window
title = "Pupil Capture - eye {}".format(eye_id)
width, height = session_settings.get("window_size", (640 + icon_bar_width, 480))
main_window = glfw.glfwCreateWindow(width, height, title, None, None)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
# UI callback functions
def set_scale(new_scale):
g_pool.gui_user_scale = new_scale
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
# gl_state settings
basic_gl_setup()
g_pool.image_tex = Named_Texture()
g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)
# setup GUI
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.iconbar)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2],
label="Interface Size",
)
)
def set_window_size():
f_width, f_height = g_pool.capture.frame_size
f_width *= 2
f_height *= 2
f_width += int(icon_bar_width * g_pool.gui.scale)
glfw.glfwSetWindowSize(main_window, f_width, f_height)
general_settings.append(ui.Button("Reset window size", set_window_size))
g_pool.hwm = pupil_socket.get_hwm()
def update_hwm(new_hwm):
g_pool.hwm = new_hwm
pupil_socket.set_hwm(new_hwm)
general_settings.append(ui.Text_Input("hwm", g_pool, setter=update_hwm, label="ZMQ High Water Mark"))
general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
general_settings.append(
ui.Selector(
"display_mode",
g_pool,
setter=set_display_mode_info,
selection=["camera_image", "roi", "algorithm"],
labels=["Camera Image", "ROI", "Algorithm"],
label="Mode",
)
)
g_pool.display_mode_info = ui.Info_Text(
g_pool.display_mode_info_text[g_pool.display_mode]
)
general_settings.append(g_pool.display_mode_info)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
toggle_general_settings(False)
plugins_to_load = session_settings.get("loaded_plugins", default_plugins)
if overwrite_cap_settings:
# Ensure that overwrite_cap_settings takes preference over source plugins
# with incorrect settings that were loaded from session settings.
plugins_to_load.append(overwrite_cap_settings)
g_pool.plugins = Plugin_List(g_pool, plugins_to_load)
if not g_pool.capture:
# Make sure we always have a capture running. Important if there was no
# capture stored in session settings.
g_pool.plugins.add(
g_pool.plugin_by_name[default_capture_name], default_capture_settings
)
g_pool.writer = None
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
# load last gui configuration
g_pool.gui.configuration = session_settings.get("ui_config", {})
# set up performance graphs
pid = os.getpid()
ps = psutil.Process(pid)
ts = g_pool.get_timestamp()
cpu_graph = graph.Bar_Graph()
cpu_graph.pos = (20, 50)
cpu_graph.update_fn = ps.cpu_percent
cpu_graph.update_rate = 5
cpu_graph.label = "CPU %0.1f"
fps_graph = graph.Bar_Graph()
fps_graph.pos = (140, 50)
fps_graph.update_rate = 5
fps_graph.label = "%0.0f FPS"
g_pool.graphs = [cpu_graph, fps_graph]
# set the last saved window size
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
should_publish_frames = False
frame_publish_format = "jpeg"
frame_publish_format_recent_warning = False
# create a timer to control window update frequency
window_update_timer = timer(1 / 60)
def window_should_update():
return next(window_update_timer)
logger.warning("Process started.")
frame = None
# Event loop
while not glfw.glfwWindowShouldClose(main_window):
if notify_sub.new_data:
t, notification = notify_sub.recv()
subject = notification["subject"]
if subject.startswith("eye_process.should_stop"):
if notification["eye_id"] == eye_id:
break
elif subject == "recording.started":
if notification["record_eye"] and g_pool.capture.online:
record_path = notification["rec_path"]
raw_mode = notification["compression"]
start_time_synced = notification["start_time_synced"]
logger.info("Will save eye video to: {}".format(record_path))
video_path = os.path.join(
record_path, "eye{}.mp4".format(eye_id)
)
if raw_mode and frame and g_pool.capture.jpeg_support:
g_pool.writer = JPEG_Writer(video_path, start_time_synced)
elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
g_pool.writer = H264Writer(
video_path,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
g_pool.capture.frame_rate,
)
else:
g_pool.writer = MPEG_Writer(video_path, start_time_synced)
elif subject == "recording.stopped":
if g_pool.writer:
logger.info("Done recording.")
try:
g_pool.writer.release()
except RuntimeError:
logger.error("No eye video recorded")
g_pool.writer = None
elif subject.startswith("meta.should_doc"):
ipc_socket.notify(
{
"subject": "meta.doc",
"actor": "eye{}".format(eye_id),
"doc": eye.__doc__,
}
)
elif subject.startswith("frame_publishing.started"):
should_publish_frames = True
frame_publish_format = notification.get("format", "jpeg")
elif subject.startswith("frame_publishing.stopped"):
should_publish_frames = False
frame_publish_format = "jpeg"
elif (
subject.startswith("start_eye_plugin")
and notification["target"] == g_pool.process
):
try:
g_pool.plugins.add(
g_pool.plugin_by_name[notification["name"]],
notification.get("args", {}),
)
except KeyError as err:
logger.error(f"Attempt to load unknown plugin: {err}")
elif subject.startswith("eye_stream.set_zmq_option.hwm"):
if notification["eye_id"] == eye_id:
update_hwm(notification['hwm'])
for plugin in g_pool.plugins:
plugin.on_notify(notification)
event = {}
for plugin in g_pool.plugins:
plugin.recent_events(event)
frame = event.get("frame")
if frame:
if should_publish_frames:
try:
if frame_publish_format == "jpeg":
data = frame.jpeg_buffer
elif frame_publish_format == "yuv":
data = frame.yuv_buffer
elif frame_publish_format == "bgr":
data = frame.bgr
elif frame_publish_format == "gray":
data = frame.gray
assert data is not None
except (AttributeError, AssertionError, NameError):
if not frame_publish_format_recent_warning:
frame_publish_format_recent_warning = True
logger.warning(
'{}s are not compatible with format "{}"'.format(
type(frame), frame_publish_format
)
)
else:
frame_publish_format_recent_warning = False
pupil_socket.send(
{
"topic": "frame.eye.{}".format(eye_id),
"width": frame.width,
"height": frame.height,
"index": frame.index,
"timestamp": frame.timestamp,
"format": frame_publish_format,
"__raw_data__": [data],
}
)
t = frame.timestamp
dt, ts = t - ts, t
try:
fps_graph.add(1.0 / dt)
except ZeroDivisionError:
pass
if g_pool.writer:
try:
g_pool.writer.write_video_frame(frame)
except NonMonotonicTimestampError as e:
logger.error(
"Recorder received non-monotonic timestamp!"
" Stopping the recording!"
)
logger.debug(str(e))
ipc_socket.notify({"subject": "recording.should_stop"})
ipc_socket.notify(
{"subject": "recording.should_stop", "remote_notify": "all"}
)
result = event.get("pupil_detection_result", None)
if result is not None:
pupil_socket.send(result)
cpu_graph.update()
# GL drawing
if window_should_update():
if is_window_visible(main_window):
glfw.glfwMakeContextCurrent(main_window)
clear_gl_screen()
glViewport(0, 0, *g_pool.camera_render_size)
for p in g_pool.plugins:
p.gl_display()
glViewport(0, 0, *window_size)
# render graphs
fps_graph.draw()
cpu_graph.draw()
# render GUI
try:
clipboard = glfw.glfwGetClipboardString(main_window).decode()
except AttributeError: # clipboard is None, might happen on startup
clipboard = ""
g_pool.gui.update_clipboard(clipboard)
user_input = g_pool.gui.update()
if user_input.clipboard != clipboard:
# only write to clipboard if content changed
glfw.glfwSetClipboardString(
main_window, user_input.clipboard.encode()
)
for button, action, mods in user_input.buttons:
x, y = glfw.glfwGetCursorPos(main_window)
pos = x * hdpi_factor, y * hdpi_factor
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for plugin in g_pool.plugins:
if plugin.on_click(pos, button, action):
break
for key, scancode, action, mods in user_input.keys:
for plugin in g_pool.plugins:
if plugin.on_key(key, scancode, action, mods):
break
for char_ in user_input.chars:
for plugin in g_pool.plugins:
if plugin.on_char(char_):
break
# update screen
glfw.glfwSwapBuffers(main_window)
glfw.glfwPollEvents()
# END while running
# in case eye recording was still runnnig: Save&close
if g_pool.writer:
logger.info("Done recording eye.")
g_pool.writer.release()
g_pool.writer = None
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
# save session persistent settings
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["flip"] = g_pool.flip
session_settings["display_mode"] = g_pool.display_mode
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["version"] = str(g_pool.version)
if not hide_ui:
glfw.glfwRestoreWindow(main_window) # need to do this for windows os
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_window_size = glfw.glfwGetWindowSize(main_window)
if 0 not in session_window_size:
session_settings["window_size"] = session_window_size
session_settings.close()
for plugin in g_pool.plugins:
plugin.alive = False
g_pool.plugins.clean()
glfw.glfwDestroyWindow(main_window)
g_pool.gui.terminate()
glfw.glfwTerminate()
logger.info("Process shutting down.")
|
def eye(
timebase,
is_alive_flag,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
version,
eye_id,
overwrite_cap_settings=None,
hide_ui=False,
hwm=None,
):
"""reads eye video and detects the pupil.
Creates a window, gl context.
Grabs images from a capture.
Streams Pupil coordinates.
Reacts to notifications:
``set_detection_mapping_mode``: Sets detection method
``eye_process.should_stop``: Stops the eye process
``recording.started``: Starts recording eye video
``recording.stopped``: Stops recording eye video
``frame_publishing.started``: Starts frame publishing
``frame_publishing.stopped``: Stops frame publishing
``start_eye_plugin``: Start plugins in eye process
Emits notifications:
``eye_process.started``: Eye process started
``eye_process.stopped``: Eye process stopped
Emits data:
``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
"""
# We deferr the imports becasue of multiprocessing.
# Otherwise the world process each process also loads the other imports.
import zmq
import zmq_tools
zmq_ctx = zmq.Context()
ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url, pub_socket_hwm)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# logging setup
import logging
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.NOTSET)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
if is_alive_flag.value:
# indicates eye process that this is a duplicated startup
logger.warning("Aborting redundant eye process startup")
return
with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
# general imports
import traceback
import numpy as np
import cv2
# display
import glfw
from pyglui import ui, graph, cygl
from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
from pyglui.cygl.utils import Named_Texture
from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
from gl_utils import make_coord_system_pixel_based
from gl_utils import make_coord_system_norm_based
from gl_utils import is_window_visible, glViewport
# monitoring
import psutil
# Plug-ins
from plugin import Plugin_List
# helpers/utils
from uvc import get_time_monotonic
from file_methods import Persistent_Dict
from version_utils import VersionFormat
from methods import normalize, denormalize, timer
from av_writer import JPEG_Writer, MPEG_Writer, NonMonotonicTimestampError
from ndsi import H264Writer
from video_capture import source_classes, manager_classes
from roi import Roi
from background_helper import IPC_Logging_Task_Proxy
from pupil_detector_plugins import available_detector_plugins
from pupil_detector_plugins.manager import PupilDetectorManager
IPC_Logging_Task_Proxy.push_url = ipc_push_url
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible for
# shutting down the eye process properly
signal.signal(signal.SIGINT, interrupt_handler)
# UI Platform tweaks
if platform.system() == "Linux":
scroll_factor = 10.0
window_position_default = (600, 300 * eye_id + 30)
elif platform.system() == "Windows":
scroll_factor = 10.0
window_position_default = (600, 90 + 300 * eye_id)
else:
scroll_factor = 1.0
window_position_default = (600, 300 * eye_id)
icon_bar_width = 50
window_size = None
hdpi_factor = 1.0
# g_pool holds variables for this process
g_pool = SimpleNamespace()
# make some constants avaiable
g_pool.user_dir = user_dir
g_pool.version = version
g_pool.app = "capture"
g_pool.eye_id = eye_id
g_pool.process = f"eye{eye_id}"
g_pool.timebase = timebase
g_pool.camera_render_size = None
g_pool.ipc_pub = ipc_socket
def get_timestamp():
return get_time_monotonic() - g_pool.timebase.value
g_pool.get_timestamp = get_timestamp
g_pool.get_now = get_time_monotonic
default_detector_cls, available_detectors = available_detector_plugins()
plugins = (
manager_classes
+ source_classes
+ available_detectors
+ [PupilDetectorManager, Roi]
)
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
preferred_names = [
f"Pupil Cam3 ID{eye_id}",
f"Pupil Cam2 ID{eye_id}",
f"Pupil Cam1 ID{eye_id}",
]
if eye_id == 0:
preferred_names += ["HD-6000"]
default_capture_name = "UVC_Source"
default_capture_settings = {
"preferred_names": preferred_names,
"frame_size": (320, 240),
"frame_rate": 120,
}
default_plugins = [
# TODO: extend with plugins
(default_capture_name, default_capture_settings),
("UVC_Manager", {}),
("NDSI_Manager", {}),
("HMD_Streaming_Manager", {}),
("File_Manager", {}),
# Detector needs to be loaded first to set `g_pool.pupil_detector`
(default_detector_cls.__name__, {}),
("PupilDetectorManager", {}),
("Roi", {}),
]
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
nonlocal hdpi_factor
active_window = glfw.glfwGetCurrentContext()
glfw.glfwMakeContextCurrent(window)
hdpi_factor = glfw.getHDPIFactor(window)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
g_pool.gui.update_window(w, h)
g_pool.gui.collect_menus()
for g in g_pool.graphs:
g.scale = hdpi_factor
g.adjust_window_size(w, h)
adjust_gl_view(w, h)
glfw.glfwMakeContextCurrent(active_window)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_iconify(window, iconified):
g_pool.iconified = iconified
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
paths = [paths[x].decode("utf-8") for x in range(count)]
for plugin in g_pool.plugins:
if plugin.on_drop(paths):
break
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
)
if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
logger.info(
"Session setting are from a different version of this app. I will not use those."
)
session_settings.clear()
g_pool.iconified = False
g_pool.capture = None
g_pool.flip = session_settings.get("flip", False)
g_pool.display_mode = session_settings.get("display_mode", "camera_image")
g_pool.display_mode_info_text = {
"camera_image": "Raw eye camera image. This uses the least amount of CPU power",
"roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
"algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
}
def set_display_mode_info(val):
g_pool.display_mode = val
g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
# Initialize glfw
glfw.glfwInit()
if hide_ui:
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # hide window
title = "Pupil Capture - eye {}".format(eye_id)
width, height = session_settings.get("window_size", (640 + icon_bar_width, 480))
main_window = glfw.glfwCreateWindow(width, height, title, None, None)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
# UI callback functions
def set_scale(new_scale):
g_pool.gui_user_scale = new_scale
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
# gl_state settings
basic_gl_setup()
g_pool.image_tex = Named_Texture()
g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)
# setup GUI
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.iconbar)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2],
label="Interface Size",
)
)
def set_window_size():
f_width, f_height = g_pool.capture.frame_size
f_width *= 2
f_height *= 2
f_width += int(icon_bar_width * g_pool.gui.scale)
glfw.glfwSetWindowSize(main_window, f_width, f_height)
general_settings.append(ui.Button("Reset window size", set_window_size))
g_pool.hwm = pupil_socket.get_hwm()
def update_hwm(new_hwm):
g_pool.hwm = new_hwm
pupil_socket.set_hwm(new_hwm)
general_settings.append(ui.Text_Input("hwm", g_pool, setter=update_hwm, label="ZMQ High Water Mark"))
general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
general_settings.append(
ui.Selector(
"display_mode",
g_pool,
setter=set_display_mode_info,
selection=["camera_image", "roi", "algorithm"],
labels=["Camera Image", "ROI", "Algorithm"],
label="Mode",
)
)
g_pool.display_mode_info = ui.Info_Text(
g_pool.display_mode_info_text[g_pool.display_mode]
)
general_settings.append(g_pool.display_mode_info)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
toggle_general_settings(False)
plugins_to_load = session_settings.get("loaded_plugins", default_plugins)
if overwrite_cap_settings:
# Ensure that overwrite_cap_settings takes preference over source plugins
# with incorrect settings that were loaded from session settings.
plugins_to_load.append(overwrite_cap_settings)
g_pool.plugins = Plugin_List(g_pool, plugins_to_load)
if not g_pool.capture:
# Make sure we always have a capture running. Important if there was no
# capture stored in session settings.
g_pool.plugins.add(
g_pool.plugin_by_name[default_capture_name], default_capture_settings
)
g_pool.writer = None
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
# load last gui configuration
g_pool.gui.configuration = session_settings.get("ui_config", {})
# set up performance graphs
pid = os.getpid()
ps = psutil.Process(pid)
ts = g_pool.get_timestamp()
cpu_graph = graph.Bar_Graph()
cpu_graph.pos = (20, 50)
cpu_graph.update_fn = ps.cpu_percent
cpu_graph.update_rate = 5
cpu_graph.label = "CPU %0.1f"
fps_graph = graph.Bar_Graph()
fps_graph.pos = (140, 50)
fps_graph.update_rate = 5
fps_graph.label = "%0.0f FPS"
g_pool.graphs = [cpu_graph, fps_graph]
# set the last saved window size
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
should_publish_frames = False
frame_publish_format = "jpeg"
frame_publish_format_recent_warning = False
# create a timer to control window update frequency
window_update_timer = timer(1 / 60)
def window_should_update():
return next(window_update_timer)
logger.warning("Process started.")
frame = None
# Event loop
while not glfw.glfwWindowShouldClose(main_window):
if notify_sub.new_data:
t, notification = notify_sub.recv()
subject = notification["subject"]
if subject.startswith("eye_process.should_stop"):
if notification["eye_id"] == eye_id:
break
elif subject == "recording.started":
if notification["record_eye"] and g_pool.capture.online:
record_path = notification["rec_path"]
raw_mode = notification["compression"]
start_time_synced = notification["start_time_synced"]
logger.info("Will save eye video to: {}".format(record_path))
video_path = os.path.join(
record_path, "eye{}.mp4".format(eye_id)
)
if raw_mode and frame and g_pool.capture.jpeg_support:
g_pool.writer = JPEG_Writer(video_path, start_time_synced)
elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
g_pool.writer = H264Writer(
video_path,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
g_pool.capture.frame_rate,
)
else:
g_pool.writer = MPEG_Writer(video_path, start_time_synced)
elif subject == "recording.stopped":
if g_pool.writer:
logger.info("Done recording.")
try:
g_pool.writer.release()
except RuntimeError:
logger.error("No eye video recorded")
g_pool.writer = None
elif subject.startswith("meta.should_doc"):
ipc_socket.notify(
{
"subject": "meta.doc",
"actor": "eye{}".format(eye_id),
"doc": eye.__doc__,
}
)
elif subject.startswith("frame_publishing.started"):
should_publish_frames = True
frame_publish_format = notification.get("format", "jpeg")
elif subject.startswith("frame_publishing.stopped"):
should_publish_frames = False
frame_publish_format = "jpeg"
elif (
subject.startswith("start_eye_plugin")
and notification["target"] == g_pool.process
):
try:
g_pool.plugins.add(
g_pool.plugin_by_name[notification["name"]],
notification.get("args", {}),
)
except KeyError as err:
logger.error(f"Attempt to load unknown plugin: {err}")
elif subject.startswith("eye_stream.set_zmq_option.hwm"):
if notification["eye_id"] == eye_id:
update_hwm(notification['hwm'])
for plugin in g_pool.plugins:
plugin.on_notify(notification)
event = {}
for plugin in g_pool.plugins:
plugin.recent_events(event)
frame = event.get("frame")
if frame:
if should_publish_frames:
try:
if frame_publish_format == "jpeg":
data = frame.jpeg_buffer
elif frame_publish_format == "yuv":
data = frame.yuv_buffer
elif frame_publish_format == "bgr":
data = frame.bgr
elif frame_publish_format == "gray":
data = frame.gray
assert data is not None
except (AttributeError, AssertionError, NameError):
if not frame_publish_format_recent_warning:
frame_publish_format_recent_warning = True
logger.warning(
'{}s are not compatible with format "{}"'.format(
type(frame), frame_publish_format
)
)
else:
frame_publish_format_recent_warning = False
pupil_socket.send(
{
"topic": "frame.eye.{}".format(eye_id),
"width": frame.width,
"height": frame.height,
"index": frame.index,
"timestamp": frame.timestamp,
"format": frame_publish_format,
"__raw_data__": [data],
}
)
t = frame.timestamp
dt, ts = t - ts, t
try:
fps_graph.add(1.0 / dt)
except ZeroDivisionError:
pass
if g_pool.writer:
try:
g_pool.writer.write_video_frame(frame)
except NonMonotonicTimestampError as e:
logger.error(
"Recorder received non-monotonic timestamp!"
" Stopping the recording!"
)
logger.debug(str(e))
ipc_socket.notify({"subject": "recording.should_stop"})
ipc_socket.notify(
{"subject": "recording.should_stop", "remote_notify": "all"}
)
result = event.get("pupil_detection_result", None)
if result is not None:
pupil_socket.send(result)
cpu_graph.update()
# GL drawing
if window_should_update():
if is_window_visible(main_window):
glfw.glfwMakeContextCurrent(main_window)
clear_gl_screen()
glViewport(0, 0, *g_pool.camera_render_size)
for p in g_pool.plugins:
p.gl_display()
glViewport(0, 0, *window_size)
# render graphs
fps_graph.draw()
cpu_graph.draw()
# render GUI
try:
clipboard = glfw.glfwGetClipboardString(main_window).decode()
except AttributeError: # clipboard is None, might happen on startup
clipboard = ""
g_pool.gui.update_clipboard(clipboard)
user_input = g_pool.gui.update()
if user_input.clipboard != clipboard:
# only write to clipboard if content changed
glfw.glfwSetClipboardString(
main_window, user_input.clipboard.encode()
)
for button, action, mods in user_input.buttons:
x, y = glfw.glfwGetCursorPos(main_window)
pos = x * hdpi_factor, y * hdpi_factor
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for plugin in g_pool.plugins:
if plugin.on_click(pos, button, action):
break
for key, scancode, action, mods in user_input.keys:
for plugin in g_pool.plugins:
if plugin.on_key(key, scancode, action, mods):
break
for char_ in user_input.chars:
for plugin in g_pool.plugins:
if plugin.on_char(char_):
break
# update screen
glfw.glfwSwapBuffers(main_window)
glfw.glfwPollEvents()
# END while running
# in case eye recording was still runnnig: Save&close
if g_pool.writer:
logger.info("Done recording eye.")
g_pool.writer.release()
g_pool.writer = None
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
# save session persistent settings
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["flip"] = g_pool.flip
session_settings["display_mode"] = g_pool.display_mode
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["version"] = str(g_pool.version)
if not hide_ui:
glfw.glfwRestoreWindow(main_window) # need to do this for windows os
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_window_size = glfw.glfwGetWindowSize(main_window)
if 0 not in session_window_size:
session_settings["window_size"] = session_window_size
session_settings.close()
for plugin in g_pool.plugins:
plugin.alive = False
g_pool.plugins.clean()
glfw.glfwDestroyWindow(main_window)
g_pool.gui.terminate()
glfw.glfwTerminate()
logger.info("Process shutting down.")
|
7,313 |
def crop(image, bounding_box, axis=None):
"""Cropping images from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
def crop(image, bounding_box, axis=None):
"""Crop an image from a bounding box.
Bounding_box (which is a 2-tuple (min_val, max_val) for each axis)
and (optional) axis for corresponding axis order to bounding_box.
Parameters
----------
Image : ndarray
Input array.
Bounding_box : list of 2-tuple (x, y) where x < y.
Bounding box.
axis : tuple, optional
Axis order for cropping.
if provided, same legth as bounding_box.
Default: None
Returns
----------
out : ndarray
Cropped array.
Examples
--------
>>> from skimage import data
>>> from skimage.util.crop import crop
>>> img = data.camera()
>>> img.shape
(512, 512)
>>> cropped_img = crop(img, [(0, 100)])
>>> cropped_img.shape
(100, 512)
>>> cropped_img = crop(img, [(0, 100), (0, 100)])
>>> cropped_img.shape
(100, 100)
>>> cropped_img = crop(img, [(0, 100), (0, 75)], axis=[1, 0])
>>> cropped_img.shape
(75, 100)
"""
# empty legth of bounding box detected on None detected
if not bounding_box:
return image
# check data isinstance of numpy array
if not isinstance(image, np.ndarray):
raise ValueError("data must be numpy array")
# if not axis provided,
# consider sequential cropping on axis
if not axis:
axis = list(range(len(bounding_box)))
else:
if len(axis) != len(set(axis)):
raise ValueError("axis must be unique")
if len(axis) != len(bounding_box):
raise ValueError("axis and bounding_box must have same length")
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be integer")
if not all(a >= 0 for a in axis):
raise ValueError("axis must be positive")
if not all(a < image.ndim for a in axis):
raise ValueError("axis must be less than image.ndim")
bbox_with_axis = list(zip(bounding_box, axis))
# sort axis by decreasing
bbox_with_axis.sort(key=lambda x: x[1], reverse=True)
full_bbox_data = []
for idx in range(image.ndim):
if bbox_with_axis and bbox_with_axis[-1][1] == idx:
bbox, _ = bbox_with_axis.pop()
axis_min, axis_max = bbox
if axis_min > axis_max:
raise ValueError(
"In bounding_box, tuple should be sorted (min_val, max_val)")
if axis_min < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_max < 0:
raise ValueError("In bounding_box, values must be positive")
if axis_min > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
if axis_max > image.shape[idx]:
raise ValueError("Invalid bounding_box!")
full_bbox_data.append(range(*bbox))
else:
full_bbox_data.append(range(image.shape[idx]))
return image[np.ix_(*full_bbox_data)]
|
56,675 |
def build_data2(w, editions, authors, ia, duplicates):
"""
Construct the Solr document to insert into Solr for the given work
:param dict w: Work to get data for
:param list[dict] editions: Editions of work
:param list[dict] authors: Authors of work
:param dict[str, dict[str, set[str]]] ia: boxid/collection of each associated IA id
(ex: `{foobar: {boxid: {"foo"}, collection: {"lendinglibrary"}}}`)
:param duplicates: FIXME unused
:rtype: dict
"""
resolve_redirects = False
assert w['type']['key'] == '/type/work'
# Some works are missing a title, but have titles on their editions
w['title'] = next(itertools.chain(
(book['title'] for book in itertools.chain([w], editions) if book.get('title')),
[None]
))
if not w['title']:
logger.error('Work missing title %s' % w['key'])
return
p = SolrProcessor(resolve_redirects)
identifiers = defaultdict(list)
editions = p.process_editions(w, editions, ia, identifiers)
has_fulltext = any(e.get('ocaid', None) for e in editions)
subjects = p.get_subject_counts(w, editions, has_fulltext)
def add_field(doc, name, value):
doc[name] = value
def add_field_list(doc, name, field_list):
doc[name] = list(field_list)
doc = p.build_data(w, editions, subjects, has_fulltext)
work_cover_id = next(itertools.chain(
(cover_id for cover_id in w.get('covers', []) if cover_id != -1),
[None]
))
cover_edition = pick_cover_edition(editions, work_cover_id)
if cover_edition:
cover_edition_key = re_edition_key.match(cover_edition['key']).group(1)
add_field(doc, 'cover_edition_key', cover_edition_key)
main_cover_id = work_cover_id or (
next(cover_id for cover_id in cover_edition['covers'] if cover_id != -1)
if cover_edition else None)
if main_cover_id:
assert isinstance(main_cover_id, int)
add_field(doc, 'cover_i', main_cover_id)
k = 'first_sentence'
fs = set( e[k]['value'] if isinstance(e[k], dict) else e[k] for e in editions if e.get(k, None))
add_field_list(doc, k, fs)
publishers = set()
for e in editions:
publishers.update('Sine nomine' if is_sine_nomine(i) else i for i in e.get('publishers', []))
add_field_list(doc, 'publisher', publishers)
# add_field_list(doc, 'publisher_facet', publishers)
lang = set()
ia_loaded_id = set()
ia_box_id = set()
for e in editions:
for l in e.get('languages', []):
m = re_lang_key.match(l['key'] if isinstance(l, dict) else l)
lang.add(m.group(1))
if e.get('ia_loaded_id'):
if isinstance(e['ia_loaded_id'], six.string_types):
ia_loaded_id.add(e['ia_loaded_id'])
else:
try:
assert isinstance(e['ia_loaded_id'], list) and isinstance(e['ia_loaded_id'][0], six.string_types)
except AssertionError:
logger.error("AssertionError: ia=%s, ia_loaded_id=%s", e.get("ia"), e['ia_loaded_id'])
raise
ia_loaded_id.update(e['ia_loaded_id'])
if e.get('ia_box_id'):
if isinstance(e['ia_box_id'], six.string_types):
ia_box_id.add(e['ia_box_id'])
else:
try:
assert isinstance(e['ia_box_id'], list) and isinstance(e['ia_box_id'][0], six.string_types)
except AssertionError:
logger.error("AssertionError: %s", e['key'])
raise
ia_box_id.update(e['ia_box_id'])
if lang:
add_field_list(doc, 'language', lang)
#if lending_edition or in_library_edition:
# add_field(doc, "borrowed_b", is_borrowed(lending_edition or in_library_edition))
author_keys = [re_author_key.match(a['key']).group(1) for a in authors]
author_names = [a.get('name', '') for a in authors]
add_field_list(doc, 'author_key', author_keys)
add_field_list(doc, 'author_name', author_names)
alt_names = set()
for a in authors:
if 'alternate_names' in a:
alt_names.update(a['alternate_names'])
add_field_list(doc, 'author_alternative_name', alt_names)
add_field_list(doc, 'author_facet', (' '.join(v) for v in zip(author_keys, author_names)))
#if subjects:
# add_field(doc, 'fiction', subjects['fiction'])
for k in 'person', 'place', 'subject', 'time':
if k not in subjects:
continue
subjects_k_keys = list(subjects[k])
add_field_list(doc, k, subjects_k_keys)
add_field_list(doc, k + '_facet', subjects_k_keys)
subject_keys = [str_to_key(s) for s in subjects_k_keys]
add_field_list(doc, k + '_key', subject_keys)
for k in sorted(identifiers):
add_field_list(doc, 'id_' + k, identifiers[k])
if ia_loaded_id:
add_field_list(doc, 'ia_loaded_id', ia_loaded_id)
if ia_box_id:
add_field_list(doc, 'ia_box_id', ia_box_id)
return doc
|
def build_data2(w, editions, authors, ia, duplicates):
"""
Construct the Solr document to insert into Solr for the given work
:param dict w: Work to get data for
:param list[dict] editions: Editions of work
:param list[dict] authors: Authors of work
:param dict[str, dict[str, set[str]]] ia: boxid/collection of each associated IA id
(ex: `{foobar: {boxid: {"foo"}, collection: {"lendinglibrary"}}}`)
:param duplicates: FIXME unused
:rtype: dict
"""
resolve_redirects = False
assert w['type']['key'] == '/type/work'
# Some works are missing a title, but have titles on their editions
w['title'] = next(itertools.chain(
(book['title'] for book in itertools.chain([w], editions) if book.get('title')),
['__None__']
))
if w['title'] == '__None__':
logger.warn('Work missing title %s' % w['key'])
p = SolrProcessor(resolve_redirects)
identifiers = defaultdict(list)
editions = p.process_editions(w, editions, ia, identifiers)
has_fulltext = any(e.get('ocaid', None) for e in editions)
subjects = p.get_subject_counts(w, editions, has_fulltext)
def add_field(doc, name, value):
doc[name] = value
def add_field_list(doc, name, field_list):
doc[name] = list(field_list)
doc = p.build_data(w, editions, subjects, has_fulltext)
work_cover_id = next(itertools.chain(
(cover_id for cover_id in w.get('covers', []) if cover_id != -1),
[None]
))
cover_edition = pick_cover_edition(editions, work_cover_id)
if cover_edition:
cover_edition_key = re_edition_key.match(cover_edition['key']).group(1)
add_field(doc, 'cover_edition_key', cover_edition_key)
main_cover_id = work_cover_id or (
next(cover_id for cover_id in cover_edition['covers'] if cover_id != -1)
if cover_edition else None)
if main_cover_id:
assert isinstance(main_cover_id, int)
add_field(doc, 'cover_i', main_cover_id)
k = 'first_sentence'
fs = set( e[k]['value'] if isinstance(e[k], dict) else e[k] for e in editions if e.get(k, None))
add_field_list(doc, k, fs)
publishers = set()
for e in editions:
publishers.update('Sine nomine' if is_sine_nomine(i) else i for i in e.get('publishers', []))
add_field_list(doc, 'publisher', publishers)
# add_field_list(doc, 'publisher_facet', publishers)
lang = set()
ia_loaded_id = set()
ia_box_id = set()
for e in editions:
for l in e.get('languages', []):
m = re_lang_key.match(l['key'] if isinstance(l, dict) else l)
lang.add(m.group(1))
if e.get('ia_loaded_id'):
if isinstance(e['ia_loaded_id'], six.string_types):
ia_loaded_id.add(e['ia_loaded_id'])
else:
try:
assert isinstance(e['ia_loaded_id'], list) and isinstance(e['ia_loaded_id'][0], six.string_types)
except AssertionError:
logger.error("AssertionError: ia=%s, ia_loaded_id=%s", e.get("ia"), e['ia_loaded_id'])
raise
ia_loaded_id.update(e['ia_loaded_id'])
if e.get('ia_box_id'):
if isinstance(e['ia_box_id'], six.string_types):
ia_box_id.add(e['ia_box_id'])
else:
try:
assert isinstance(e['ia_box_id'], list) and isinstance(e['ia_box_id'][0], six.string_types)
except AssertionError:
logger.error("AssertionError: %s", e['key'])
raise
ia_box_id.update(e['ia_box_id'])
if lang:
add_field_list(doc, 'language', lang)
#if lending_edition or in_library_edition:
# add_field(doc, "borrowed_b", is_borrowed(lending_edition or in_library_edition))
author_keys = [re_author_key.match(a['key']).group(1) for a in authors]
author_names = [a.get('name', '') for a in authors]
add_field_list(doc, 'author_key', author_keys)
add_field_list(doc, 'author_name', author_names)
alt_names = set()
for a in authors:
if 'alternate_names' in a:
alt_names.update(a['alternate_names'])
add_field_list(doc, 'author_alternative_name', alt_names)
add_field_list(doc, 'author_facet', (' '.join(v) for v in zip(author_keys, author_names)))
#if subjects:
# add_field(doc, 'fiction', subjects['fiction'])
for k in 'person', 'place', 'subject', 'time':
if k not in subjects:
continue
subjects_k_keys = list(subjects[k])
add_field_list(doc, k, subjects_k_keys)
add_field_list(doc, k + '_facet', subjects_k_keys)
subject_keys = [str_to_key(s) for s in subjects_k_keys]
add_field_list(doc, k + '_key', subject_keys)
for k in sorted(identifiers):
add_field_list(doc, 'id_' + k, identifiers[k])
if ia_loaded_id:
add_field_list(doc, 'ia_loaded_id', ia_loaded_id)
if ia_box_id:
add_field_list(doc, 'ia_box_id', ia_box_id)
return doc
|
4,299 |
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
Structure of the binary EEG/accelerometer/trigger data in the file.
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
18,959 |
def setup_parser(subparser):
subparser.epilog = """\
for further documentation regarding the spec syntax, see:
spack help --spec
"""
arguments.add_common_arguments(
subparser, ['long', 'very_long', 'install_status'])
subparser.add_argument(
'-y', '--yaml', action='store_const', dest='format', default=None,
const='yaml', help='print concrete spec as YAML')
subparser.add_argument(
'-j', '--json', action='store_const', dest='format', default=None,
const='json', help='print concrete spec as JSON')
subparser.add_argument(
'-c', '--cover', action='store',
default='nodes', choices=['nodes', 'edges', 'paths'],
help='how extensively to traverse the DAG (default: nodes)')
subparser.add_argument(
'-N', '--namespaces', action='store_true', default=False,
help='show fully qualified package names')
subparser.add_argument(
'--hash-type', default="build_hash",
choices=['build_hash', 'full_hash', 'dag_hash'],
help='generate spec with a particular hash type.')
subparser.add_argument(
'--test', default=None,
choices=['all', 'root'],
help=""" If 'all' is chosen, concretize with test dependencies for all packages.
If 'root' is chosen, concretiz with test dependencies only for the root
spec(s). If nothing is chosen, don't add test dependencies for any packages."""
)
subparser.add_argument(
'-t', '--types', action='store_true', default=False,
help='show dependency types')
arguments.add_common_arguments(subparser, ['specs'])
|
def setup_parser(subparser):
subparser.epilog = """\
for further documentation regarding the spec syntax, see:
spack help --spec
"""
arguments.add_common_arguments(
subparser, ['long', 'very_long', 'install_status'])
subparser.add_argument(
'-y', '--yaml', action='store_const', dest='format', default=None,
const='yaml', help='print concrete spec as YAML')
subparser.add_argument(
'-j', '--json', action='store_const', dest='format', default=None,
const='json', help='print concrete spec as JSON')
subparser.add_argument(
'-c', '--cover', action='store',
default='nodes', choices=['nodes', 'edges', 'paths'],
help='how extensively to traverse the DAG (default: nodes)')
subparser.add_argument(
'-N', '--namespaces', action='store_true', default=False,
help='show fully qualified package names')
subparser.add_argument(
'--hash-type', default="build_hash",
choices=['build_hash', 'full_hash', 'dag_hash'],
help='generate spec with a particular hash type.')
subparser.add_argument(
'--test', default=None,
choices=['all', 'root'],
help=""" If 'all' is chosen, concretize with test dependencies for all packages.
If 'root' is chosen, concretize with test dependencies only for the root
spec(s). If nothing is chosen, don't add test dependencies for any packages."""
)
subparser.add_argument(
'-t', '--types', action='store_true', default=False,
help='show dependency types')
arguments.add_common_arguments(subparser, ['specs'])
|
31,922 |
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# This creation of the relationships is just an example of creating relationships in reputation commands.
# We will create relationships between indicators only in case that the API returns information about
# the relationship between two indicators.
# See https://xsoar.pan.dev/docs/integrations/generic-commands-reputation#relationships
relationships_list = []
links = ip_data.get('network', {}).get('links', [])
for link in links:
relationships_list.append(EntityRelationship(
entity_a=ip,
entity_a_type=FeedIndicatorType.IP,
name='related-to',
entity_b=link,
entity_b_type=FeedIndicatorType.URL,
brand='HelloWorld'))
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score,
relationships=relationships_list
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context,
relationships=relationships_list
))
return command_results
|
def ip_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""ip command: Returns IP reputation for a list of IPs
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['ip']`` is a list of IPs or a single IP
``args['threshold']`` threshold to determine whether an IP is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an IP is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains IPs
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
ips = argToList(args.get('ip'))
if len(ips) == 0:
raise ValueError('IP(s) not specified')
# It's a good practice to document the threshold you use to determine
# if a score is malicious in your integration documentation.
# Thresholds should also be possible to override, as in this case,
# where threshold is an actual argument of the command.
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return
# each CommandResult will contain context standard for IP
command_results: List[CommandResults] = []
for ip in ips:
ip_data = client.get_ip_reputation(ip)
ip_data['ip'] = ip
# This creation is an example of creating relationships in reputation commands.
# We will create relationships between indicators only in case that the API returns information about
# the relationship between two indicators.
# See https://xsoar.pan.dev/docs/integrations/generic-commands-reputation#relationships
relationships_list = []
links = ip_data.get('network', {}).get('links', [])
for link in links:
relationships_list.append(EntityRelationship(
entity_a=ip,
entity_a_type=FeedIndicatorType.IP,
name='related-to',
entity_b=link,
entity_b_type=FeedIndicatorType.URL,
brand='HelloWorld'))
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(ip_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (IP) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the HelloWorld Design Document
# Create the DBotScore structure first using the Common.DBotScore class.
dbot_score = Common.DBotScore(
indicator=ip,
indicator_type=DBotScoreType.IP,
integration_name='HelloWorld',
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the IP Standard Context structure using Common.IP and add
# dbot_score to it.
ip_standard_context = Common.IP(
ip=ip,
asn=ip_data.get('asn'),
dbot_score=dbot_score,
relationships=relationships_list
)
# INTEGRATION DEVELOPER TIP
# In the integration specific Context output (HelloWorld.IP) in this
# example you want to provide a lot of information as it can be used
# programmatically from within Cortex XSOAR in playbooks and commands.
# On the other hand, this API is way to verbose, so we want to select
# only certain keys to be returned in order not to clog the context
# with useless information. What to actually return in the context and
# to define as a command output is subject to design considerations.
# INTEGRATION DEVELOPER TIP
# To generate the Context Outputs on the YML use ``demisto-sdk``'s
# ``json-to-outputs`` option.
# Define which fields we want to exclude from the context output as
# they are too verbose.
ip_context_excluded_fields = ['objects', 'nir']
ip_data = {k: ip_data[k] for k in ip_data if k not in ip_context_excluded_fields}
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('IP', ip_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.IP``, using ``ip`` as the key field.
# ``indicator`` is used to provide the context standard (IP)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.IP',
outputs_key_field='ip',
outputs=ip_data,
indicator=ip_standard_context,
relationships=relationships_list
))
return command_results
|
22,729 |
def load_cert(cert_path):
"""Reads the certificate PEM file and returns a cryptography.x509 object
:param str cert_path: Path to the certificate
:rtype `cryptography.x509`:
:returns: x509 certificate object
"""
with open(cert_path, 'rb') as fh:
cert_pem = fh.read()
return x509.load_pem_x509_certificate(cert_pem, default_backend())
|
def load_cert(cert_path):
"""Reads the certificate PEM file and returns a cryptography.x509.Certificate object.
:param str cert_path: Path to the certificate
:rtype `cryptography.x509`:
:returns: x509 certificate object
"""
with open(cert_path, 'rb') as fh:
cert_pem = fh.read()
return x509.load_pem_x509_certificate(cert_pem, default_backend())
|
41,739 |
def objective(trial):
# Clear clutter from previous Keras session graphs.
clear_session()
X, y = load_wine(return_X_y=True)
X = standardize(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)
model = create_model(X.shape[1], trial)
model.fit(X_train,
y_train,
shuffle=True,
batch_size=BATCHSIZE,
epochs=EPOCHS,
verbose=False)
score = model.evaluate(X_test, y_test, verbose=0)
with mlflow.start_run() as run:
mlflow.log_params(trial.params)
mlflow.log_metrics({'mean_squared_error': score})
return score
|
def objective(trial):
# Clear clutter from previous Keras session graphs.
clear_session()
X, y = load_wine(return_X_y=True)
X = standardize(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=42)
model = create_model(X.shape[1], trial)
model.fit(X_train,
y_train,
shuffle=True,
batch_size=BATCHSIZE,
epochs=EPOCHS,
verbose=False)
score = model.evaluate(X_test, y_test, verbose=0)
with mlflow.start_run():
mlflow.log_params(trial.params)
mlflow.log_metrics({'mean_squared_error': score})
return score
|
5,440 |
def _import_api():
"""
Download https://<url>/pve-docs/api-viewer/apidoc.js
Extract content of pveapi var (json formatted) if proxmox is version 6
Extract content of apiSchema var (json formatted) if proxmox is version 7
Load this json content into global variable "api"
"""
global api
full_url = "https://{}:{}/pve-docs/api-viewer/apidoc.js".format(url, port)
returned_data = requests.get(full_url, verify=verify_ssl)
# Filter below will check for a json variable named either pveapi or apiSchema
re_filter = re.compile(
"((?<=pveapi =)|(?<=apiSchema =))(.*)(?=^;)", re.DOTALL | re.MULTILINE
)
# Results for both possible matches are returned as a tuple instead of a single string
filter_results = re_filter.findall(returned_data.text)[0]
# We need to capture the single tuple item expected to have valid json
for result in filter_results:
if result != "":
api_json = result
api = salt.utils.json.loads(api_json)
|
def _import_api():
"""
Download https://<url>/pve-docs/api-viewer/apidoc.js
Extract content of pveapi var (json formatted) if proxmox is version 6
Extract content of apiSchema var (json formatted) if proxmox is version 7
Load this json content into global variable "api"
"""
global api
full_url = "https://{}:{}/pve-docs/api-viewer/apidoc.js".format(url, port)
returned_data = requests.get(full_url, verify=verify_ssl)
# Filter below will check for a json variable named either pveapi or apiSchema
re_filter = re.compile(
"((?<=pveapi =)|(?<=apiSchema =))(.*)(?=^;)", re.DOTALL | re.MULTILINE
)
# Results for both possible matches are returned as a tuple instead of a single string
filter_results = re_filter.findall(returned_data.text)[0]
# We need to capture the single tuple item expected to have valid json
for result in filter_results:
if result:
api_json = result
api = salt.utils.json.loads(api_json)
|
19,887 |
def macadam_limits(target_brightness, illuminant=()):
"""
whavelenght reaches from 360 to 830 nm, in within the programm it is
handled as 0 to 470. Beyond the references this programm is very fast,
because the possible optimums are not simply tested step by step but
more effectively targeted by steps of power of two. The whavelenghts
left and right of a rough optimum are fited by a rule of proportion,
so that the wished brightness will be reached exactly.
Parameters
----------
target_brightness : floating point
brightness has to be between 0 and 1
illuminant: object
illuminant must be out of colorimetry.MSDS_CMFS['XXX']
If there is no illuminant or it has the wrong form,
the illuminant SDS_ILLUMINANTS['E']
is choosen wich has no influence to the calculations,
because it is an equal-energy-spectrum
if necessary a third parameter for the
colour-matching funciton could easily be implemented
Returns
-------
an array of CIE -X,Y,Z - Triples for every single whavelength
in single nm - Steps in the range from 360 to 830 nm
References
----------
- cite: Wyszecki, G., & Stiles, W. S. (2000).
In Color Science: Concepts and Methods,
Quantitative Data and Formulae (pp. 181–184). Wiley.
ISBN:978-0-471-39918-6
- cite: Francisco Martínez-Verdú, Esther Perales,
Elisabet Chorro, Dolores de Fez,
Valentín Viqueira, and Eduardo Gilabert, "Computation and
visualization of the MacAdam limits
for any lightness, hue angle, and light source," J.
Opt. Soc. Am. A 24, 1501-1515 (2007)
- cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010
/ Vol. 35, No. 1 (pp. 2031 - 2033)
Example
--------
from matplotlib import pyplot as plt
import numpy as np
import math
fig = plt.figure(figsize=(7,7))
ax = fig.add_axes([0,0,1,1])
illuminant = colour.SDS_ILLUMINANTS['D65']
def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra):
FirstColumn = 0
SecondColumn = 1
x = Yxy_Narrowband_Spectra[...,FirstColumn]
y = Yxy_Narrowband_Spectra[...,SecondColumn]
ax.plot(x,y,'orange',label='Spectrum Loci')
x = [Yxy_Narrowband_Spectra[-1][FirstColumn],
Yxy_Narrowband_Spectra[0][FirstColumn]]
y = [Yxy_Narrowband_Spectra[-1][SecondColumn],
Yxy_Narrowband_Spectra[0][SecondColumn]]
ax.plot(x,y,'purple',label='Purple Boundary')
return()
for n in range(1, 20):
Yxy_Narrowband_Spectra = colour.XYZ_to_xy(
colour.macadam_limits(n/20, illuminant) / 100)
plot_Narrowband_Spectra (Yxy_Narrowband_Spectra)
plt.show()
"""
target_bright = target_brightness
if target_bright > 1 or target_bright < 0:
raise TypeError('brightness of function macadam_limits( )'
'has to be between 0 and 1')
standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer']
X_cie31 = standard_cfms.values[..., 0]
Y_cie31 = standard_cfms.values[..., 1]
Z_cie31 = standard_cfms.values[..., 2]
try:
illuminant.interpolator
except AttributeError:
illuminant = SDS_ILLUMINANTS['E']
# If there is no illuminant or it has the wrong form,
# an illuminant choosen with no influence
# If the illuminanats do not match the format of the Standard Observer,
# they have to be adaptet
illuminant.extrapolate(SpectralShape(360, 830))
illuminant.interpolate(SpectralShape(360, 830, 1))
# The cie31 cmfs are convolved with the given illuminant
X_illuminated = X_cie31 * illuminant.values
Y_illuminated = Y_cie31 * illuminant.values
Z_illuminated = Z_cie31 * illuminant.values
# Generate empty output-array
out_limits = np.zeros_like(standard_cfms.values)
# This Array has 471 entries for whavelenghts from 360 nm to 830 nm
opti_colour = np.zeros_like(Y_illuminated)
# The array of optimal colours has the same dimensions like Y_illuminated
# and all entries are initialy set to zero
middle_opti_colour = 235
# is a constant and not be changed. At 595nm (360 + 235)
# in the middle of the center_opti_colour-array
# be aware that counting in array-positions starts at zero
# The first optimum color has its center initialy at zero
maximum_brightness = np.sum(Y_illuminated)
# "integral" over Y_illuminated
def optimum_colour(width, center):
opti_colour = np.zeros(471)
# creates array of 471 zeros and ones which represents optimum-colours
# All values of the opti_colour-array are intialy set to zero
half_width = width
center_opti_colour = center
middle_opti_colour = 235
opti_colour[middle_opti_colour - half_width:middle_opti_colour +
half_width + 1] = 1
# we start the construction of the optimum color
# at the center of the opti_colour-array
opti_colour = np.roll(opti_colour,
center_opti_colour - middle_opti_colour)
# the optimum colour is rolled to the right whavelenght
return opti_colour
def bright_opti_colour(width, center, lightsource):
brightness = np.sum(
optimum_colour(width, center) * lightsource) / maximum_brightness
return brightness
step_size = np.array([64, 32, 16, 8, 4, 2, 1])
for whavelength in range(0, 471):
width = 127
for n in step_size:
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
if brightness > target_bright or width > 234:
width -= n
else:
width += n
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
if brightness < target_bright:
width += 1
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
rough_optimum = optimum_colour(width, whavelength)
brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness
# in the following, the both borders of the found rough_optimum
# are reduced to get more exact results
bright_difference = (brightness - target_bright) * maximum_brightness
# discrimination for single-whavelenght-spectra
if width > 0:
opti_colour = np.zeros(471)
opti_colour[middle_opti_colour - width:middle_opti_colour + width +
1] = 1
# instead rolling foreward opti_colour, light is rolled backward
rolled_light = np.roll(Y_illuminated,
middle_opti_colour - whavelength)
opti_colour_light = opti_colour * rolled_light
left_opti = opti_colour_light[middle_opti_colour - width]
right_opti = opti_colour_light[middle_opti_colour + width]
interpolation = 1 - (bright_difference / (left_opti + right_opti))
opti_colour[middle_opti_colour - width] = interpolation
opti_colour[middle_opti_colour + width] = interpolation
# opti_colour is rolled to right possition
final_optimum = np.roll(opti_colour,
whavelength - middle_opti_colour)
else:
final_optimum = rough_optimum / brightness * target_bright
out_X = np.sum(final_optimum * X_illuminated)
out_Y = target_bright * maximum_brightness
out_Z = np.sum(final_optimum * Z_illuminated)
triple = np.array([out_X, out_Y, out_Z])
out_limits[whavelength] = triple
return (out_limits)
|
def macadam_limits(target_brightness, illuminant=()):
"""
whavelenght reaches from 360 to 830 nm, in within the programm it is
handled as 0 to 470. Beyond the references this programm is very fast,
because the possible optimums are not simply tested step by step but
more effectively targeted by steps of power of two. The whavelenghts
left and right of a rough optimum are fited by a rule of proportion,
so that the wished brightness will be reached exactly.
Parameters
----------
target_brightness : floating point
brightness has to be between 0 and 1
illuminant: object
illuminant must be out of colorimetry.MSDS_CMFS['XXX']
If there is no illuminant or it has the wrong form,
the illuminant SDS_ILLUMINANTS['E']
is choosen wich has no influence to the calculations,
because it is an equal-energy-spectrum
if necessary a third parameter for the
colour-matching funciton could easily be implemented
Returns
-------
an array of CIE -X,Y,Z - Triples for every single whavelength
in single nm - Steps in the range from 360 to 830 nm
References
----------
- cite: Wyszecki, G., & Stiles, W. S. (2000).
In Color Science: Concepts and Methods,
Quantitative Data and Formulae (pp. 181–184). Wiley.
ISBN:978-0-471-39918-6
- cite: Francisco Martínez-Verdú, Esther Perales,
Elisabet Chorro, Dolores de Fez,
Valentín Viqueira, and Eduardo Gilabert, "Computation and
visualization of the MacAdam limits
for any lightness, hue angle, and light source," J.
Opt. Soc. Am. A 24, 1501-1515 (2007)
- cite: Kenichiro Masaoka. In OPTICS LETTERS, June 15, 2010
/ Vol. 35, No. 1 (pp. 2031 - 2033)
Example
--------
from matplotlib import pyplot as plt
import numpy as np
import math
fig = plt.figure(figsize=(7,7))
ax = fig.add_axes([0,0,1,1])
illuminant = colour.SDS_ILLUMINANTS['D65']
def plot_Narrowband_Spectra (Yxy_Narrowband_Spectra):
FirstColumn = 0
SecondColumn = 1
x = Yxy_Narrowband_Spectra[...,FirstColumn]
y = Yxy_Narrowband_Spectra[...,SecondColumn]
ax.plot(x,y,'orange',label='Spectrum Loci')
x = [Yxy_Narrowband_Spectra[-1][FirstColumn],
Yxy_Narrowband_Spectra[0][FirstColumn]]
y = [Yxy_Narrowband_Spectra[-1][SecondColumn],
Yxy_Narrowband_Spectra[0][SecondColumn]]
ax.plot(x,y,'purple',label='Purple Boundary')
return()
for n in range(1, 20):
Yxy_Narrowband_Spectra = colour.XYZ_to_xy(
colour.macadam_limits(n/20, illuminant) / 100)
plot_Narrowband_Spectra (Yxy_Narrowband_Spectra)
plt.show()
"""
target_bright = target_brightness
if target_bright > 1 or target_bright < 0:
raise TypeError('brightness of function macadam_limits( )'
'has to be between 0 and 1')
standard_cfms = MSDS_CMFS['CIE 1931 2 Degree Standard Observer']
X_cie31 = standard_cfms.values[..., 0]
Y_cie31 = standard_cfms.values[..., 1]
Z_cie31 = standard_cfms.values[..., 2]
try:
illuminant.interpolator
except AttributeError:
illuminant = SDS_ILLUMINANTS['E']
# If there is no illuminant or it has the wrong form,
# an illuminant choosen with no influence
# If the illuminanats do not match the format of the Standard Observer,
# they have to be adaptet
illuminant.extrapolate(SpectralShape(360, 830))
illuminant.interpolate(SpectralShape(360, 830, 1))
# The cie31 cmfs are convolved with the given illuminant
X_illuminated = X_cie31 * illuminant.values
Y_illuminated = Y_cie31 * illuminant.values
Z_illuminated = Z_cie31 * illuminant.values
# Generate empty output-array
out_limits = np.zeros_like(standard_cfms.values)
# This Array has 471 entries for whavelenghts from 360 nm to 830 nm
opti_colour = np.zeros_like(Y_illuminated)
# The array of optimal colours has the same dimensions like Y_illuminated
# and all entries are initialy set to zero
middle_opti_colour = 235
# is a constant and not be changed. At 595nm (360 + 235)
# in the middle of the center_opti_colour-array
# be aware that counting in array-positions starts at zero
# The first optimum color has its center initialy at zero
maximum_brightness = np.sum(Y_illuminated)
# "integral" over Y_illuminated
def optimum_colour(width, center):
opti_colour = np.zeros(471)
# creates array of 471 zeros and ones which represents optimum-colours
# All values of the opti_colour-array are intialy set to zero
half_width = width
center_opti_colour = center
middle_opti_colour = 235
opti_colour[middle_opti_colour - half_width:middle_opti_colour +
half_width + 1] = 1
# we start the construction of the optimum color
# at the center of the opti_colour-array
opti_colour = np.roll(opti_colour,
center_opti_colour - middle_opti_colour)
# the optimum colour is rolled to the right whavelenght
return opti_colour
def bright_opti_colour(width, center, lightsource):
brightness = np.sum(
optimum_colour(width, center) * lightsource) / maximum_brightness
return brightness
step_size = np.array([64, 32, 16, 8, 4, 2, 1])
for whavelength in range(0, 471):
width = 127
for n in step_size:
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
if brightness > target_bright or width > 234:
width -= n
else:
width += n
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
if brightness < target_bright:
width += 1
brightness = bright_opti_colour(width, whavelength, Y_illuminated)
rough_optimum = optimum_colour(width, whavelength)
brightness = np.sum(rough_optimum * Y_illuminated) / maximum_brightness
# in the following, the both borders of the found rough_optimum
# are reduced to get more exact results
bright_difference = (brightness - target_bright) * maximum_brightness
# discrimination for single-whavelenght-spectra
if width > 0:
opti_colour = np.zeros(471)
opti_colour[middle_opti_colour - width:middle_opti_colour + width +
1] = 1
# instead rolling foreward opti_colour, light is rolled backward
rolled_light = np.roll(Y_illuminated,
middle_opti_colour - whavelength)
opti_colour_light = opti_colour * rolled_light
left_opti = opti_colour_light[middle_opti_colour - width]
right_opti = opti_colour_light[middle_opti_colour + width]
interpolation = 1 - (bright_difference / (left_opti + right_opti))
opti_colour[middle_opti_colour - width] = interpolation
opti_colour[middle_opti_colour + width] = interpolation
# opti_colour is rolled to right possition
final_optimum = np.roll(opti_colour,
wavelength - middle_opti_colour)
else:
final_optimum = rough_optimum / brightness * target_bright
out_X = np.sum(final_optimum * X_illuminated)
out_Y = target_bright * maximum_brightness
out_Z = np.sum(final_optimum * Z_illuminated)
triple = np.array([out_X, out_Y, out_Z])
out_limits[whavelength] = triple
return (out_limits)
|
55,589 |
def get_plugin_manager():
"""
Return plugin manager instance
"""
# Set up plugin infrastructure
pm = pluggy.PluginManager('tljh')
pm.add_hookspecs(hooks)
pm.load_setuptools_entrypoints('tljh')
return pm
|
def get_plugin_manager():
"""
Return plugin manager instance
"""
# Set up plugin infrastructure
pm = pluggy.PluginManager('tljh')
pm.add_hookspecs(hooks)
pm.load_setuptools_entrypoints('tljh')
return pm
|
5,060 |
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h / 3])
np.testing.assert_allclose(maxp, exp)
|
22,997 |
def test_vertices_thick():
nlay, ncpl = 3, 5
vertices = [
[0, 0.0, 3.0],
[1, 1.0, 3.0],
[2, 2.0, 3.0],
[3, 0.0, 2.0],
[4, 1.0, 2.0],
[5, 2.0, 2.0],
[6, 0.0, 1.0],
[7, 1.0, 1.0],
[8, 2.0, 1.0],
[9, 0.0, 0.0],
[10, 1.0, 0.0],
]
iverts = [
[0, 0, 1, 4, 3],
[1, 1, 2, 5, 4],
[2, 3, 4, 7, 6],
[3, 4, 5, 8, 7],
[4, 6, 7, 10, 9],
[5, 0, 1, 4, 3],
[6, 1, 2, 5, 4],
[7, 3, 4, 7, 6],
[8, 4, 5, 8, 7],
[9, 6, 7, 10, 9],
[10, 0, 1, 4, 3],
[11, 1, 2, 5, 4],
[12, 3, 4, 7, 6],
[13, 4, 5, 8, 7],
[14, 6, 7, 10, 9],
]
top = np.ones((nlay, ncpl), dtype=float)
top[0, :] = 10.
top[1, :] = 5.
top[2, :] = 0.
botm = np.zeros((nlay, ncpl), dtype=float)
botm[0, :] = 5.
botm[1, :] = 0.
botm[2, :] = -5.
grid = VertexGrid(
nlay=nlay,
ncpl=ncpl,
vertices=vertices,
cell2d=iverts,
top=top,
botm=botm,
)
thick = grid.thick()
assert np.allclose(thick, 5.), "thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 10.)
assert np.allclose(sat_thick, thick), "saturated thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 5.)
assert np.allclose(sat_thick, thick), "saturated thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 2.5)
assert np.allclose(sat_thick, 2.5), "saturated thicknesses != 2.5"
sat_thick = grid.thick(array=grid.botm)
assert np.allclose(sat_thick, 0.), "saturated thicknesses != 0."
sat_thick = grid.thick(array=grid.botm-100.)
assert np.allclose(sat_thick, 0.), "saturated thicknesses != 0."
return
|
def test_vertices_thick():
nlay, ncpl = 3, 5
vertices = [
[0, 0.0, 3.0],
[1, 1.0, 3.0],
[2, 2.0, 3.0],
[3, 0.0, 2.0],
[4, 1.0, 2.0],
[5, 2.0, 2.0],
[6, 0.0, 1.0],
[7, 1.0, 1.0],
[8, 2.0, 1.0],
[9, 0.0, 0.0],
[10, 1.0, 0.0],
]
iverts = [
[0, 0, 1, 4, 3],
[1, 1, 2, 5, 4],
[2, 3, 4, 7, 6],
[3, 4, 5, 8, 7],
[4, 6, 7, 10, 9],
[5, 0, 1, 4, 3],
[6, 1, 2, 5, 4],
[7, 3, 4, 7, 6],
[8, 4, 5, 8, 7],
[9, 6, 7, 10, 9],
[10, 0, 1, 4, 3],
[11, 1, 2, 5, 4],
[12, 3, 4, 7, 6],
[13, 4, 5, 8, 7],
[14, 6, 7, 10, 9],
]
top = np.ones((ncpl, ), dtype=float) * 10
botm = np.zeros((nlay, ncpl), dtype=float)
botm[0, :] = 5.
botm[1, :] = 0.
botm[2, :] = -5.
grid = VertexGrid(
nlay=nlay,
ncpl=ncpl,
vertices=vertices,
cell2d=iverts,
top=top,
botm=botm,
)
thick = grid.thick()
assert np.allclose(thick, 5.), "thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 10.)
assert np.allclose(sat_thick, thick), "saturated thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 5.)
assert np.allclose(sat_thick, thick), "saturated thicknesses != 5."
sat_thick = grid.thick(array=grid.botm + 2.5)
assert np.allclose(sat_thick, 2.5), "saturated thicknesses != 2.5"
sat_thick = grid.thick(array=grid.botm)
assert np.allclose(sat_thick, 0.), "saturated thicknesses != 0."
sat_thick = grid.thick(array=grid.botm-100.)
assert np.allclose(sat_thick, 0.), "saturated thicknesses != 0."
return
|
30,459 |
def enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values):
if 'properties' in asset:
for prop in asset['properties']:
if prop['name'] in ASSET_PROPERTIES_NAMES_MAP:
asset_dict[ASSET_PROPERTIES_NAMES_MAP[prop['name']]] = {'Value': prop['value'],
'LastUser': prop['last_reported_by']}
elif prop['name'] in ASSET_PROPERTIES_ENDPOINT_NAMES_MAP:
endpoint_dict[ASSET_PROPERTIES_ENDPOINT_NAMES_MAP[prop['name']]] = prop['value']
elif full_values:
if prop['name'] in FULL_ASSET_PROPERTIES_NAMES_MAP:
asset_dict[FULL_ASSET_PROPERTIES_NAMES_MAP[prop['name']]] = {'Value': prop['value'],
'LastUser': prop['last_reported_by']}
return None
|
def enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values):
if 'properties' in asset:
for prop in asset.get('properties', []):
if prop['name'] in ASSET_PROPERTIES_NAMES_MAP:
asset_dict[ASSET_PROPERTIES_NAMES_MAP[prop['name']]] = {'Value': prop['value'],
'LastUser': prop['last_reported_by']}
elif prop['name'] in ASSET_PROPERTIES_ENDPOINT_NAMES_MAP:
endpoint_dict[ASSET_PROPERTIES_ENDPOINT_NAMES_MAP[prop['name']]] = prop['value']
elif full_values:
if prop['name'] in FULL_ASSET_PROPERTIES_NAMES_MAP:
asset_dict[FULL_ASSET_PROPERTIES_NAMES_MAP[prop['name']]] = {'Value': prop['value'],
'LastUser': prop['last_reported_by']}
return None
|
4,473 |
def _check_sphere(sphere, info=None, sphere_units='m'):
from ..defaults import HEAD_SIZE_DEFAULT
from ..bem import fit_sphere_to_headshape, ConductorModel, get_fitting_dig
if sphere is None:
sphere = HEAD_SIZE_DEFAULT
if info is not None:
# Decide if we have enough dig points to do the auto fit
try:
get_fitting_dig(info, 'extra', verbose='error')
except (RuntimeError, ValueError):
pass
else:
sphere = 'auto'
if isinstance(sphere, str):
if sphere not in ('auto', 'eeglab'):
raise ValueError(f'sphere, if str, must be "auto" or "eeglab", '
f'got {sphere}')
assert info is not None
if sphere == 'auto':
R, r0, _ = fit_sphere_to_headshape(info, verbose=False, units='m')
sphere = tuple(r0) + (R,)
sphere_units = 'm'
elif sphere == 'eeglab':
montage = info.get_montage()
horizon_ch_names = ['Oz', 'Fpz', 'T7', 'T8']
for ch_name in horizon_ch_names:
if ch_name not in montage.ch_names:
raise ValueError(
f'spehre="eeglab" requires digitization points of '
f'the following electrode locations in the data: '
f'{", ".join(horizon_ch_names)}, but could not find: '
f'{ch_name}')
# Extracted from Mikołaj Magnuski's example
ch_pos = montage.get_positions()['ch_pos']
pos = np.stack([ch_pos[ch_name] for ch_name in horizon_ch_names])
# now we calculate the radius from T7 and T8 x position
# (we could use Oz and Fpz y positions as well)
radius = np.abs(pos[[2, 3], 0]).mean()
# then we obtain the x, y, z sphere center this way:
# x: x position of the Oz channel (should be very close to 0)
# y: y position of the T8 channel (should be very close to 0 too)
# z: average z position of Oz, Fpz, T7 and T8 (their z position
# should be the the same, so we could also use just one of these
# channels), it should be positive and somewhere around `0.03`
# (3 cm)
x = pos[0, 0]
y = pos[-1, 1]
z = pos[:, -1].mean()
sphere=(x, y, z, radius)
sphere_units = 'm'
del x, y , z, radius, montage, ch_pos
elif isinstance(sphere, ConductorModel):
if not sphere['is_sphere'] or len(sphere['layers']) == 0:
raise ValueError('sphere, if a ConductorModel, must be spherical '
'with multiple layers, not a BEM or single-layer '
'sphere (got %s)' % (sphere,))
sphere = tuple(sphere['r0']) + (sphere['layers'][0]['rad'],)
sphere_units = 'm'
sphere = np.array(sphere, dtype=float)
if sphere.shape == ():
sphere = np.concatenate([[0.] * 3, [sphere]])
if sphere.shape != (4,):
raise ValueError('sphere must be float or 1D array of shape (4,), got '
'array-like of shape %s' % (sphere.shape,))
_check_option('sphere_units', sphere_units, ('m', 'mm'))
if sphere_units == 'mm':
sphere /= 1000.
sphere = np.array(sphere, float)
return sphere
|
def _check_sphere(sphere, info=None, sphere_units='m'):
from ..defaults import HEAD_SIZE_DEFAULT
from ..bem import fit_sphere_to_headshape, ConductorModel, get_fitting_dig
if sphere is None:
sphere = HEAD_SIZE_DEFAULT
if info is not None:
# Decide if we have enough dig points to do the auto fit
try:
get_fitting_dig(info, 'extra', verbose='error')
except (RuntimeError, ValueError):
pass
else:
sphere = 'auto'
if isinstance(sphere, str):
if sphere not in ('auto', 'eeglab'):
raise ValueError(f'sphere, if str, must be "auto" or "eeglab", '
f'got {sphere}')
assert info is not None
if sphere == 'auto':
R, r0, _ = fit_sphere_to_headshape(info, verbose=False, units='m')
sphere = tuple(r0) + (R,)
sphere_units = 'm'
elif sphere == 'eeglab':
montage = info.get_montage()
horizon_ch_names = ['Oz', 'Fpz', 'T7', 'T8']
for ch_name in horizon_ch_names:
if ch_name not in montage.ch_names:
raise ValueError(
f'spehre="eeglab" requires digitization points of '
f'the following electrode locations in the data: '
f'{", ".join(horizon_ch_names)}, but could not find: '
f'{ch_name}')
# Extracted from Mikołaj Magnuski's example
ch_pos = montage.get_positions()['ch_pos']
pos = np.stack([ch_pos[ch_name] for ch_name in horizon_ch_names])
# now we calculate the radius from T7 and T8 x position
# (we could use Oz and Fpz y positions as well)
radius = np.abs(pos[[2, 3], 0]).mean()
# then we obtain the x, y, z sphere center this way:
# x: x position of the Oz channel (should be very close to 0)
# y: y position of the T8 channel (should be very close to 0 too)
# z: average z position of Oz, Fpz, T7 and T8 (their z position
# should be the the same, so we could also use just one of these
# channels), it should be positive and somewhere around `0.03`
# (3 cm)
x = pos[0, 0]
y = pos[-1, 1]
z = pos[:, -1].mean()
sphere = (x, y, z, radius)
sphere_units = 'm'
del x, y , z, radius, montage, ch_pos
elif isinstance(sphere, ConductorModel):
if not sphere['is_sphere'] or len(sphere['layers']) == 0:
raise ValueError('sphere, if a ConductorModel, must be spherical '
'with multiple layers, not a BEM or single-layer '
'sphere (got %s)' % (sphere,))
sphere = tuple(sphere['r0']) + (sphere['layers'][0]['rad'],)
sphere_units = 'm'
sphere = np.array(sphere, dtype=float)
if sphere.shape == ():
sphere = np.concatenate([[0.] * 3, [sphere]])
if sphere.shape != (4,):
raise ValueError('sphere must be float or 1D array of shape (4,), got '
'array-like of shape %s' % (sphere.shape,))
_check_option('sphere_units', sphere_units, ('m', 'mm'))
if sphere_units == 'mm':
sphere /= 1000.
sphere = np.array(sphere, float)
return sphere
|
30,443 |
def main(circle_artifacts):
print('Starting to create content artifact...')
print('creating dir for bundles...')
for bundle_dir in [BUNDLE_POST, BUNDLE_TEST, PACKS_BUNDLE]:
os.mkdir(bundle_dir)
add_tools_to_bundle(BUNDLE_POST)
convert_incident_fields_to_array()
for package_dir in DIR_TO_PREFIX:
# handles nested package directories
create_unifieds_and_copy(package_dir)
for content_dir in CONTENT_DIRS:
print(f'Copying dir {content_dir} to bundles...')
copy_dir_files(content_dir, BUNDLE_POST)
copy_test_files(BUNDLE_TEST)
# handle copying packs content to content_new.zip and content_test.zip
packs = get_child_directories(PACKS_DIR)
for pack in packs:
# each pack directory has it's own content subdirs, 'Integrations', 'Scripts', 'TestPlaybooks', 'Layouts' etc.
sub_dirs_paths = get_child_directories(pack)
for sub_dir_path in sub_dirs_paths:
dir_name = os.path.basename(sub_dir_path)
if dir_name == 'TestPlaybooks':
copy_test_files(BUNDLE_TEST, sub_dir_path)
else:
# handle one-level deep content
copy_dir_files(sub_dir_path, BUNDLE_POST)
if dir_name in DIR_TO_PREFIX.keys():
# then it's a directory with nested packages that need to be handled
# handle nested packages
create_unifieds_and_copy(sub_dir_path)
# handle copying packs content to packs_bundle for zipping to `content_packs.zip`
for pack in packs:
pack_name = os.path.basename(pack)
pack_dst = os.path.join(PACKS_BUNDLE, pack_name)
os.mkdir(pack_dst)
pack_dirs = get_child_directories(pack)
pack_files = get_child_files(pack)
# copy first level pack files over
for file_path in pack_files:
shutil.copy(file_path, os.path.join(pack_dst, os.path.basename(file_path)))
# handle content directories in the pack
for content_dir in pack_dirs:
dir_name = os.path.basename(content_dir)
dest_dir = os.path.join(pack_dst, dir_name)
os.mkdir(dest_dir)
if dir_name in DIR_TO_PREFIX.keys():
packages_dirs = get_child_directories(content_dir)
for package_dir in packages_dirs:
package_dir_name = os.path.basename(package_dir)
dest_package_dir = os.path.join(dest_dir, package_dir_name)
os.mkdir(dest_package_dir)
package_dir_with_slash = package_dir + '/'
merge_script_package_to_yml(package_dir_with_slash, dir_name, dest_path=dest_package_dir)
# also copy CHANGELOG markdown files over
package_files = get_child_files(package_dir)
changelog_files = [
file_path
for file_path in package_files if ('CHANGELOG' in file_path and file_path.endswith('.md'))
]
for md_file_path in changelog_files:
shutil.copyfile(md_file_path, os.path.join(dest_package_dir, os.path.basename(md_file_path)))
else:
if dir_name == INCIDENT_FIELDS_DIR:
convert_incident_fields_to_array(content_dir)
copy_dir_files(content_dir, dest_dir)
print('Copying content descriptor to bundles')
for bundle_dir in [BUNDLE_POST, BUNDLE_TEST]:
shutil.copyfile('content-descriptor.json', os.path.join(bundle_dir, 'content-descriptor.json'))
print('copying common server doc to bundles')
shutil.copyfile('./Documentation/doc-CommonServer.json', os.path.join(BUNDLE_POST, 'doc-CommonServer.json'))
print('Compressing bundles...')
shutil.make_archive(ZIP_POST, 'zip', BUNDLE_POST)
shutil.make_archive(ZIP_TEST, 'zip', BUNDLE_TEST)
shutil.make_archive(ZIP_PACKS, 'zip', PACKS_BUNDLE)
shutil.copyfile(ZIP_POST + '.zip', os.path.join(circle_artifacts, ZIP_POST + '.zip'))
shutil.copyfile(ZIP_TEST + '.zip', os.path.join(circle_artifacts, ZIP_TEST + '.zip'))
shutil.copyfile(ZIP_PACKS + '.zip', os.path.join(circle_artifacts, ZIP_PACKS + '.zip'))
shutil.copyfile("./Tests/id_set.json", os.path.join(circle_artifacts, "id_set.json"))
shutil.copyfile('release-notes.md', os.path.join(circle_artifacts, 'release-notes.md'))
print(f'finished create content artifact at {circle_artifacts}')
|
def main(circle_artifacts):
print('Starting to create content artifact...')
print('creating dir for bundles...')
for bundle_dir in [BUNDLE_POST, BUNDLE_TEST, PACKS_BUNDLE]:
os.mkdir(bundle_dir)
add_tools_to_bundle(BUNDLE_POST)
convert_incident_fields_to_array()
for package_dir in DIR_TO_PREFIX:
# handles nested package directories
create_unifieds_and_copy(package_dir)
for content_dir in CONTENT_DIRS:
print(f'Copying dir {content_dir} to bundles...')
copy_dir_files(content_dir, BUNDLE_POST)
copy_test_files(BUNDLE_TEST)
# handle copying packs content to content_new.zip and content_test.zip
packs = get_child_directories(PACKS_DIR)
for pack in packs:
# each pack directory has it's own content subdirs, 'Integrations', 'Scripts', 'TestPlaybooks', 'Layouts' etc.
sub_dirs_paths = get_child_directories(pack)
for sub_dir_path in sub_dirs_paths:
dir_name = os.path.basename(sub_dir_path)
if dir_name == 'TestPlaybooks':
copy_test_files(BUNDLE_TEST, sub_dir_path)
else:
# handle one-level deep content
copy_dir_files(sub_dir_path, BUNDLE_POST)
if dir_name in DIR_TO_PREFIX.keys():
# then it's a directory with nested packages that need to be handled
# handle nested packages
create_unifieds_and_copy(sub_dir_path)
# handle copying packs content to packs_bundle for zipping to `content_packs.zip`
for pack in packs:
pack_name = os.path.basename(pack)
pack_dst = os.path.join(PACKS_BUNDLE, pack_name)
os.mkdir(pack_dst)
pack_dirs = get_child_directories(pack)
pack_files = get_child_files(pack)
# copy first level pack files over
for file_path in pack_files:
shutil.copy(file_path, os.path.join(pack_dst, os.path.basename(file_path)))
# handle content directories in the pack
for content_dir in pack_dirs:
dir_name = os.path.basename(content_dir)
dest_dir = os.path.join(pack_dst, dir_name)
os.mkdir(dest_dir)
if dir_name in DIR_TO_PREFIX.keys():
packages_dirs = get_child_directories(content_dir)
for package_dir in packages_dirs:
package_dir_name = os.path.basename(package_dir)
dest_package_dir = os.path.join(dest_dir, package_dir_name)
os.mkdir(dest_package_dir)
package_dir_with_slash = package_dir + '/'
merge_script_package_to_yml(package_dir_with_slash, dir_name, dest_path=dest_package_dir)
# also copy CHANGELOG markdown files over
package_files = get_child_files(package_dir)
changelog_files = [
file_path
for file_path in package_files if 'CHANGELOG.md' in file_path
]
for md_file_path in changelog_files:
shutil.copyfile(md_file_path, os.path.join(dest_package_dir, os.path.basename(md_file_path)))
else:
if dir_name == INCIDENT_FIELDS_DIR:
convert_incident_fields_to_array(content_dir)
copy_dir_files(content_dir, dest_dir)
print('Copying content descriptor to bundles')
for bundle_dir in [BUNDLE_POST, BUNDLE_TEST]:
shutil.copyfile('content-descriptor.json', os.path.join(bundle_dir, 'content-descriptor.json'))
print('copying common server doc to bundles')
shutil.copyfile('./Documentation/doc-CommonServer.json', os.path.join(BUNDLE_POST, 'doc-CommonServer.json'))
print('Compressing bundles...')
shutil.make_archive(ZIP_POST, 'zip', BUNDLE_POST)
shutil.make_archive(ZIP_TEST, 'zip', BUNDLE_TEST)
shutil.make_archive(ZIP_PACKS, 'zip', PACKS_BUNDLE)
shutil.copyfile(ZIP_POST + '.zip', os.path.join(circle_artifacts, ZIP_POST + '.zip'))
shutil.copyfile(ZIP_TEST + '.zip', os.path.join(circle_artifacts, ZIP_TEST + '.zip'))
shutil.copyfile(ZIP_PACKS + '.zip', os.path.join(circle_artifacts, ZIP_PACKS + '.zip'))
shutil.copyfile("./Tests/id_set.json", os.path.join(circle_artifacts, "id_set.json"))
shutil.copyfile('release-notes.md', os.path.join(circle_artifacts, 'release-notes.md'))
print(f'finished create content artifact at {circle_artifacts}')
|
33,459 |
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="""Output filename, type dependent on suffix see can.LogReader.""",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="""Maximum file size in bytes. Rotate log file when size threshold is reached.""",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
pass
finally:
logger.stop()
|
def main():
parser = argparse.ArgumentParser(
"python -m can.logconvert",
description="Convert a log file from one format to another.",
)
parser.add_argument(
"-o",
"--output",
dest="output",
help="""Output filename, type dependent on suffix see can.LogReader.""",
default=None,
required=True,
)
parser.add_argument(
"-s",
"--file_size",
dest="file_size",
type=int,
help="""Maximum file size in bytes. Rotate log file when size threshold is reached.""",
default=None,
)
parser.add_argument(
"infile",
metavar="input-file",
type=str,
help="Log file to convert from. For supported types see can.LogReader.",
)
# print help message when no arguments were given
if len(sys.argv) < 2:
parser.print_help(sys.stderr)
raise SystemExit(errno.EINVAL)
results = parser.parse_args()
reader = LogReader(results.infile)
if results.file_size:
logger = SizedRotatingLogger(
base_filename=results.output, max_bytes=results.file_size
)
else:
logger = Logger(filename=results.output)
try:
for m in reader: # pylint: disable=not-an-iterable
logger(m)
except KeyboardInterrupt:
print("Interrupted! Quitting.")
finally:
logger.stop()
|
42,675 |
def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_ETH.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
|
def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_ETH.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={'asset': A_BTC.serialize()},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
|
13,982 |
def cross_correlation_histogram(
binned_st1, binned_st2, window='full', border_correction=False,
binary=False, kernel=None, method='speed', cross_corr_coef=False):
"""
Computes the cross-correlation histogram (CCH) between two binned spike
trains `binned_st1` and `binned_st2`.
Parameters
----------
binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain
Binned spike trains to cross-correlate. The input spike trains can have
any `t_start` and `t_stop`.
window : {'valid', 'full'} or list of int, optional
‘full’: This returns the cross-correlation at each point of overlap,
with an output shape of (N+M-1,). At the end-points of the
cross-correlogram, the signals do not overlap completely, and
boundary effects may be seen.
‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
The cross-correlation product is only given for points where
the signals overlap completely.
Values outside the signal boundary have no effect.
List of integers (min_lag, max_lag):
The entries of window are two integers representing the left and
right extremes (expressed as number of bins) where the
cross-correlation is computed.
Default: 'full'
border_correction : bool, optional
whether to correct for the border effect. If True, the value of the
CCH at bin b (for b=-H,-H+1, ...,H, where H is the CCH half-length)
is multiplied by the correction factor:
(H+1)/(H+1-|b|),
which linearly corrects for loss of bins at the edges.
Default: False
binary : bool, optional
whether to binarize spikes from the same spike train falling in the
same bin. If True, such spikes are considered as a single spike;
otherwise they are considered as different spikes.
Default: False.
kernel : array or None, optional
A one dimensional array containing an optional smoothing kernel applied
to the resulting CCH. The length N of the kernel indicates the
smoothing window. The smoothing window cannot be larger than the
maximum lag of the CCH. The kernel is normalized to unit area before
being applied to the resulting CCH. Popular choices for the kernel are
* normalized boxcar kernel: numpy.ones(N)
* hamming: numpy.hamming(N)
* hanning: numpy.hanning(N)
* bartlett: numpy.bartlett(N)
If None is specified, the CCH is not smoothed.
Default: None
method : string, optional
Defines the algorithm to use. "speed" uses numpy.correlate to calculate
the correlation between two binned spike trains using a non-sparse data
representation. Due to various optimizations, it is the fastest
realization. In contrast, the option "memory" uses an own
implementation to calculate the correlation based on sparse matrices,
which is more memory efficient but slower than the "speed" option.
Default: "speed"
cross_corr_coef : bool, optional
Normalizes the CCH to obtain the cross-correlation coefficient
function ranging from -1 to 1 according to Equation (5.10) in [1]_.
See Notes.
Returns
-------
cch_result : neo.AnalogSignal
Containing the cross-correlation histogram between `binned_st1` and
`binned_st2`.
Offset bins correspond to correlations at a delay equivalent
to the difference between the spike times of `binned_st1`
("reference neuron") and those of `binned_st2` ("target neruon"): an
entry at positive lags corresponds to a spike in `binned_st2` following
a spike in `binned_st1` bins to the right, and an entry at negative
lags corresponds to a spike in `binned_st1` following a spike in
`binned_st2`.
To illustrate this definition, consider two spike trains with the same
`t_start` and `t_stop`:
`binned_st1` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0
`binned_st2` ('target neuron') : 0 0 0 0 0 0 0 1 0 0 0
Here, the CCH will have an entry of `1` at `lag=+3`.
Consistent with the definition of AnalogSignals, the time axis
represents the left bin borders of each histogram bin. For example,
the time axis might be:
`np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms`
lags : np.ndarray
Contains the IDs of the individual histogram bins, where the central
bin has ID 0, bins the left have negative IDs and bins to the right
have positive IDs, e.g.,:
`np.array([-3, -2, -1, 0, 1, 2, 3])`
Notes
-----
The Eq. (5.10) in [1]_ is valid for binned spike trains with at most one
spike per bin. For a general case, refer to the implementation of
`_covariance_sparse()`.
References
----------
.. [1] "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7.
Example
-------
Plot the cross-correlation histogram between two Poisson spike trains
>>> import elephant
>>> import matplotlib.pyplot as plt
>>> import quantities as pq
>>> binned_st1 = elephant.conversion.BinnedSpikeTrain(
... elephant.spike_train_generation.homogeneous_poisson_process(
... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
... binsize=5. * pq.ms)
>>> binned_st2 = elephant.conversion.BinnedSpikeTrain(
... elephant.spike_train_generation.homogeneous_poisson_process(
... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
... binsize=5. * pq.ms)
>>> cc_hist = \
... elephant.spike_train_correlation.cross_correlation_histogram(
... binned_st1, binned_st2, window=[-30,30],
... border_correction=False,
... binary=False, kernel=None, method='memory')
>>> plt.bar(left=cc_hist[0].times.magnitude,
... height=cc_hist[0][:, 0].magnitude,
... width=cc_hist[0].sampling_period.magnitude)
>>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')')
>>> plt.ylabel('cross-correlation histogram')
>>> plt.axis('tight')
>>> plt.show()
Alias
-----
`cch`
"""
# Check that the spike trains are binned with the same temporal
# resolution
if binned_st1.matrix_rows != 1 or binned_st2.matrix_rows != 1:
raise ValueError("Spike trains must be one dimensional")
if not np.isclose(binned_st1.binsize.simplified.magnitude,
binned_st2.binsize.simplified.magnitude):
raise ValueError("Bin sizes must be equal")
binsize = binned_st1.binsize
window_min = -binned_st1.num_bins + 1
window_max = binned_st2.num_bins - 1
t_start_shift = (binned_st2.t_start - binned_st1.t_start) / binsize
t_start_shift = t_start_shift.simplified.magnitude
# In the examples below we fix st2 and "move" st1.
# Zero-lag is equal to `max(st1.t_start, st2.t_start)`.
# Binned spiketrains (t_start and t_stop) with binsize=1ms:
# 1) st1=[3, 8] ms, st2=[1, 13] ms
# t_start_shift = -2 ms
# zero-lag is at 3 ms
# 2) st1=[1, 7] ms, st2=[2, 9] ms
# t_start_shift = 1 ms
# zero-lag is at 2 ms
# 3) st1=[1, 7] ms, st2=[4, 6] ms
# t_start_shift = 3 ms
# zero-lag is at 4 ms
# Set the time window in which is computed the cch
if isinstance(window[0], int) and isinstance(window[1], int):
# ex. 1) lags range: [w[0] - 2, w[1] - 2] ms
# ex. 2) lags range: [w[0] + 1, w[1] + 1] ms
# ex. 3) lags range: [w[0] + 3, w[0] + 3] ms
if window[0] >= window[1] or window[0] <= window_min \
or window[1] >= window_max:
raise ValueError(
"The window exceeds the length of the spike trains")
left_edge, right_edge = window[0], window[1]
lags = np.arange(left_edge + t_start_shift,
right_edge + 1 + t_start_shift, dtype=np.int32)
cch_mode = 'pad'
elif window == 'full':
# cch computed for all the possible entries
# ex. 1) lags range: [-6, 9] ms
# ex. 2) lags range: [-4, 7] ms
# ex. 3) lags range: [-2, 4] ms
left_edge = window_min
right_edge = window_max
lags = np.arange(left_edge + t_start_shift,
right_edge + 1 + t_start_shift, dtype=np.int32)
cch_mode = window
elif window == 'valid':
if binned_st1.num_bins < binned_st2.num_bins:
# ex. 1) lags range: [-2, 5] ms
# ex. 2) lags range: [1, 2] ms
left_edge = (binned_st2.t_start -
binned_st1.t_start) / binsize
right_edge = (binned_st2.t_stop -
binned_st1.t_stop) / binsize
else:
# ex. 3) lags range: [-1, 3] ms
left_edge = (binned_st2.t_stop -
binned_st1.t_stop) / binsize
right_edge = (binned_st2.t_start -
binned_st1.t_start) / binsize
right_edge = int(right_edge.simplified.magnitude)
left_edge = int(left_edge.simplified.magnitude)
lags = np.arange(left_edge, right_edge + 1, dtype=np.int32)
cch_mode = window
else:
raise ValueError("Invalid window parameter")
if binary:
binned_st1 = binned_st1.binarize(copy=True)
binned_st2 = binned_st2.binarize(copy=True)
cch_builder = _CrossCorrHist(binned_st1, binned_st2,
window=(left_edge, right_edge))
if method == 'memory':
cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode)
else:
cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode)
if border_correction:
cross_corr = cch_builder.border_correction(cross_corr)
if kernel is not None:
cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel)
if cross_corr_coef:
cross_corr = cch_builder.cross_corr_coef(cross_corr)
# Transform the array count into an AnalogSignal
cch_result = neo.AnalogSignal(
signal=cross_corr.reshape(cross_corr.size, 1),
units=pq.dimensionless,
t_start=(lags[0] - 0.5) * binned_st1.binsize,
sampling_period=binned_st1.binsize)
# Return only the hist_bins bins and counts before and after the
# central one
return cch_result, lags
|
def cross_correlation_histogram(
binned_st1, binned_st2, window='full', border_correction=False,
binary=False, kernel=None, method='speed', cross_corr_coef=False):
"""
Computes the cross-correlation histogram (CCH) between two binned spike
trains `binned_st1` and `binned_st2`.
Parameters
----------
binned_st1, binned_st2 : elephant.conversion.BinnedSpikeTrain
Binned spike trains to cross-correlate. The input spike trains can have
any `t_start` and `t_stop`.
window : {'valid', 'full'} or list of int, optional
‘full’: This returns the cross-correlation at each point of overlap,
with an output shape of (N+M-1,). At the end-points of the
cross-correlogram, the signals do not overlap completely, and
boundary effects may be seen.
‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
The cross-correlation product is only given for points where
the signals overlap completely.
Values outside the signal boundary have no effect.
List of integers (min_lag, max_lag):
The entries of window are two integers representing the left and
right extremes (expressed as number of bins) where the
cross-correlation is computed.
Default: 'full'
border_correction : bool, optional
whether to correct for the border effect. If True, the value of the
CCH at bin b (for b=-H,-H+1, ...,H, where H is the CCH half-length)
is multiplied by the correction factor:
(H+1)/(H+1-|b|),
which linearly corrects for loss of bins at the edges.
Default: False
binary : bool, optional
whether to binarize spikes from the same spike train falling in the
same bin. If True, such spikes are considered as a single spike;
otherwise they are considered as different spikes.
Default: False.
kernel : array or None, optional
A one dimensional array containing an optional smoothing kernel applied
to the resulting CCH. The length N of the kernel indicates the
smoothing window. The smoothing window cannot be larger than the
maximum lag of the CCH. The kernel is normalized to unit area before
being applied to the resulting CCH. Popular choices for the kernel are
* normalized boxcar kernel: numpy.ones(N)
* hamming: numpy.hamming(N)
* hanning: numpy.hanning(N)
* bartlett: numpy.bartlett(N)
If None is specified, the CCH is not smoothed.
Default: None
method : string, optional
Defines the algorithm to use. "speed" uses numpy.correlate to calculate
the correlation between two binned spike trains using a non-sparse data
representation. Due to various optimizations, it is the fastest
realization. In contrast, the option "memory" uses an own
implementation to calculate the correlation based on sparse matrices,
which is more memory efficient but slower than the "speed" option.
Default: "speed"
cross_corr_coef : bool, optional
Normalizes the CCH to obtain the cross-correlation coefficient
function ranging from -1 to 1 according to Equation (5.10) in [1]_.
See Notes.
Returns
-------
cch_result : neo.AnalogSignal
Containing the cross-correlation histogram between `binned_st1` and
`binned_st2`.
Offset bins correspond to correlations at a delay equivalent
to the difference between the spike times of `binned_st1`
("reference neuron") and those of `binned_st2` ("target neuron"): an
entry at positive lags corresponds to a spike in `binned_st2` following
a spike in `binned_st1` bins to the right, and an entry at negative
lags corresponds to a spike in `binned_st1` following a spike in
`binned_st2`.
To illustrate this definition, consider two spike trains with the same
`t_start` and `t_stop`:
`binned_st1` ('reference neuron') : 0 0 0 0 1 0 0 0 0 0 0
`binned_st2` ('target neuron') : 0 0 0 0 0 0 0 1 0 0 0
Here, the CCH will have an entry of `1` at `lag=+3`.
Consistent with the definition of AnalogSignals, the time axis
represents the left bin borders of each histogram bin. For example,
the time axis might be:
`np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms`
lags : np.ndarray
Contains the IDs of the individual histogram bins, where the central
bin has ID 0, bins the left have negative IDs and bins to the right
have positive IDs, e.g.,:
`np.array([-3, -2, -1, 0, 1, 2, 3])`
Notes
-----
The Eq. (5.10) in [1]_ is valid for binned spike trains with at most one
spike per bin. For a general case, refer to the implementation of
`_covariance_sparse()`.
References
----------
.. [1] "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7.
Example
-------
Plot the cross-correlation histogram between two Poisson spike trains
>>> import elephant
>>> import matplotlib.pyplot as plt
>>> import quantities as pq
>>> binned_st1 = elephant.conversion.BinnedSpikeTrain(
... elephant.spike_train_generation.homogeneous_poisson_process(
... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
... binsize=5. * pq.ms)
>>> binned_st2 = elephant.conversion.BinnedSpikeTrain(
... elephant.spike_train_generation.homogeneous_poisson_process(
... 10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
... binsize=5. * pq.ms)
>>> cc_hist = \
... elephant.spike_train_correlation.cross_correlation_histogram(
... binned_st1, binned_st2, window=[-30,30],
... border_correction=False,
... binary=False, kernel=None, method='memory')
>>> plt.bar(left=cc_hist[0].times.magnitude,
... height=cc_hist[0][:, 0].magnitude,
... width=cc_hist[0].sampling_period.magnitude)
>>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')')
>>> plt.ylabel('cross-correlation histogram')
>>> plt.axis('tight')
>>> plt.show()
Alias
-----
`cch`
"""
# Check that the spike trains are binned with the same temporal
# resolution
if binned_st1.matrix_rows != 1 or binned_st2.matrix_rows != 1:
raise ValueError("Spike trains must be one dimensional")
if not np.isclose(binned_st1.binsize.simplified.magnitude,
binned_st2.binsize.simplified.magnitude):
raise ValueError("Bin sizes must be equal")
binsize = binned_st1.binsize
window_min = -binned_st1.num_bins + 1
window_max = binned_st2.num_bins - 1
t_start_shift = (binned_st2.t_start - binned_st1.t_start) / binsize
t_start_shift = t_start_shift.simplified.magnitude
# In the examples below we fix st2 and "move" st1.
# Zero-lag is equal to `max(st1.t_start, st2.t_start)`.
# Binned spiketrains (t_start and t_stop) with binsize=1ms:
# 1) st1=[3, 8] ms, st2=[1, 13] ms
# t_start_shift = -2 ms
# zero-lag is at 3 ms
# 2) st1=[1, 7] ms, st2=[2, 9] ms
# t_start_shift = 1 ms
# zero-lag is at 2 ms
# 3) st1=[1, 7] ms, st2=[4, 6] ms
# t_start_shift = 3 ms
# zero-lag is at 4 ms
# Set the time window in which is computed the cch
if isinstance(window[0], int) and isinstance(window[1], int):
# ex. 1) lags range: [w[0] - 2, w[1] - 2] ms
# ex. 2) lags range: [w[0] + 1, w[1] + 1] ms
# ex. 3) lags range: [w[0] + 3, w[0] + 3] ms
if window[0] >= window[1] or window[0] <= window_min \
or window[1] >= window_max:
raise ValueError(
"The window exceeds the length of the spike trains")
left_edge, right_edge = window[0], window[1]
lags = np.arange(left_edge + t_start_shift,
right_edge + 1 + t_start_shift, dtype=np.int32)
cch_mode = 'pad'
elif window == 'full':
# cch computed for all the possible entries
# ex. 1) lags range: [-6, 9] ms
# ex. 2) lags range: [-4, 7] ms
# ex. 3) lags range: [-2, 4] ms
left_edge = window_min
right_edge = window_max
lags = np.arange(left_edge + t_start_shift,
right_edge + 1 + t_start_shift, dtype=np.int32)
cch_mode = window
elif window == 'valid':
if binned_st1.num_bins < binned_st2.num_bins:
# ex. 1) lags range: [-2, 5] ms
# ex. 2) lags range: [1, 2] ms
left_edge = (binned_st2.t_start -
binned_st1.t_start) / binsize
right_edge = (binned_st2.t_stop -
binned_st1.t_stop) / binsize
else:
# ex. 3) lags range: [-1, 3] ms
left_edge = (binned_st2.t_stop -
binned_st1.t_stop) / binsize
right_edge = (binned_st2.t_start -
binned_st1.t_start) / binsize
right_edge = int(right_edge.simplified.magnitude)
left_edge = int(left_edge.simplified.magnitude)
lags = np.arange(left_edge, right_edge + 1, dtype=np.int32)
cch_mode = window
else:
raise ValueError("Invalid window parameter")
if binary:
binned_st1 = binned_st1.binarize(copy=True)
binned_st2 = binned_st2.binarize(copy=True)
cch_builder = _CrossCorrHist(binned_st1, binned_st2,
window=(left_edge, right_edge))
if method == 'memory':
cross_corr = cch_builder.correlate_memory(cch_mode=cch_mode)
else:
cross_corr = cch_builder.correlate_speed(cch_mode=cch_mode)
if border_correction:
cross_corr = cch_builder.border_correction(cross_corr)
if kernel is not None:
cross_corr = cch_builder.kernel_smoothing(cross_corr, kernel=kernel)
if cross_corr_coef:
cross_corr = cch_builder.cross_corr_coef(cross_corr)
# Transform the array count into an AnalogSignal
cch_result = neo.AnalogSignal(
signal=cross_corr.reshape(cross_corr.size, 1),
units=pq.dimensionless,
t_start=(lags[0] - 0.5) * binned_st1.binsize,
sampling_period=binned_st1.binsize)
# Return only the hist_bins bins and counts before and after the
# central one
return cch_result, lags
|
22,037 |
def open(path, convert=False, shuffle=False, fs_options={}, fs=None, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> df = vaex.open('sometable.hdf5')
>>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str or list path: local or absolute path to file, or glob string, or list of paths
:param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``
The conversion is skipped if the input file or conversion argument did not change.
:param bool shuffle: shuffle converted DataFrame or not
:param dict fs_options: Extra arguments passed to an optional file system if needed:
* Amazon AWS S3
* `anonymous` - access file without authentication (public files)
* `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file
* `secret_key` - AWS secret key, similar to `access_key`
* `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
* `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.
* `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio
* Google Cloud Storage
* :py:class:`gcsfs.core.GCSFileSystem`
In addition you can pass the boolean "cache" option.
:param group: (optional) Specify the group to be read from and HDF5 file. By default this is set to "/table".
:param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object, if specified, fs_options should be empty.
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:return: return a DataFrame on success, otherwise None
:rtype: DataFrame
Cloud storage support:
Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.
Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access
is as fast as native disk access.
The following common fs_options are used for S3 access:
* anon: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)
All fs_options can also be encoded in the file path as a query string.
Examples:
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={'anonymous': True})
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={'access_key': my_key, 'secret_key': my_secret_key})
>>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')
Google Cloud Storage support:
The following fs_options are used for GCP access:
* token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).
* project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`
Examples:
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={'token': None})
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')
>>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')
"""
import vaex
import vaex.convert
try:
if not isinstance(path, (list, tuple)):
# remote and clusters only support single path, not a list
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
if path.startswith("http://") or path.startswith("ws://") or \
path.startswith("vaex+wss://") or path.startswith("wss://") or \
path.startswith("vaex+http://") or path.startswith("vaex+ws://"):
server, name = path.rsplit("/", 1)
url = urlparse(path)
if '?' in name:
name = name[:name.index('?')]
extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}
if 'token' in extra_args:
kwargs['token'] = extra_args['token']
if 'token_trusted' in extra_args:
kwargs['token_trusted'] = extra_args['token_trusted']
client = vaex.connect(server, **kwargs)
return client[name]
if path.startswith("cluster"):
import vaex.enterprise.distributed
return vaex.enterprise.distributed.open(path, *args, **kwargs)
import vaex.file
import glob
if isinstance(path, str):
paths = [path]
else:
paths = path
filenames = []
for path in paths:
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
naked_path, options = vaex.file.split_options(path)
if glob.has_magic(naked_path):
filenames.extend(list(sorted(vaex.file.glob(path, fs_options=fs_options, fs=fs))))
else:
filenames.append(path)
df = None
if len(filenames) == 0:
raise IOError(f'File pattern did not match anything {path}')
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
# # naked_path, _ = vaex.file.split_options(path, fs_options)
_, ext, _ = vaex.file.split_ext(path)
if ext == '.csv': # special case for csv
return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, **kwargs)
if convert:
path_output = convert if isinstance(convert, str) else filename_hdf5
vaex.convert.convert(
path_input=path, fs_options_input=fs_options, fs_input=fs,
path_output=path_output, fs_options_output=fs_options, fs_output=fs,
*args, **kwargs
)
ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)
else:
ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)
df = vaex.from_dataset(ds)
if df is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
df = vaex.open(filename_hdf5)
else:
dfs = []
for filename in filenames:
dfs.append(vaex.open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
df = vaex.concat(dfs)
if convert:
if shuffle:
df = df.shuffle()
df.export_hdf5(filename_hdf5)
df = vaex.open(filename_hdf5)
if df is None:
raise IOError('Unknown error opening: {}'.format(path))
return df
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
|
def open(path, convert=False, shuffle=False, fs_options={}, fs=None, *args, **kwargs):
"""Open a DataFrame from file given by path.
Example:
>>> df = vaex.open('sometable.hdf5')
>>> df = vaex.open('somedata*.csv', convert='bigdata.hdf5')
:param str or list path: local or absolute path to file, or glob string, or list of paths
:param convert: Uses `dataframe.export` when convert is a path. If True, ``convert=path+'.hdf5'``
The conversion is skipped if the input file or conversion argument did not change.
:param bool shuffle: shuffle converted DataFrame or not
:param dict fs_options: Extra arguments passed to an optional file system if needed:
* Amazon AWS S3
* `anonymous` - access file without authentication (public files)
* `access_key` - AWS access key, if not provided will use the standard env vars, or the `~/.aws/credentials` file
* `secret_key` - AWS secret key, similar to `access_key`
* `profile` - If multiple profiles are present in `~/.aws/credentials`, pick this one instead of 'default', see https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
* `region` - AWS Region, e.g. 'us-east-1`, will be determined automatically if not provided.
* `endpoint_override` - URL/ip to connect to, instead of AWS, e.g. 'localhost:9000' for minio
* Google Cloud Storage
* :py:class:`gcsfs.core.GCSFileSystem`
In addition you can pass the boolean "cache" option.
:param group: (optional) Specify the group to be read from a HDF5 file. By default this is set to "/table".
:param fs: Apache Arrow FileSystem object, or FSSpec FileSystem object, if specified, fs_options should be empty.
:param args: extra arguments for file readers that need it
:param kwargs: extra keyword arguments
:return: return a DataFrame on success, otherwise None
:rtype: DataFrame
Cloud storage support:
Vaex supports streaming of HDF5 files from Amazon AWS S3 and Google Cloud Storage.
Files are by default cached in $HOME/.vaex/file-cache/(s3|gs) such that successive access
is as fast as native disk access.
The following common fs_options are used for S3 access:
* anon: Use anonymous access or not (false by default). (Allowed values are: true,True,1,false,False,0)
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0)
All fs_options can also be encoded in the file path as a query string.
Examples:
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5', fs_options={'anonymous': True})
>>> df = vaex.open('s3://vaex/taxi/yellow_taxi_2015_f32s.hdf5?anon=true')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5', fs_options={'access_key': my_key, 'secret_key': my_secret_key})
>>> df = vaex.open(f's3://mybucket/path/to/file.hdf5?access_key={{my_key}}&secret_key={{my_secret_key}}')
>>> df = vaex.open('s3://mybucket/path/to/file.hdf5?profile=myproject')
Google Cloud Storage support:
The following fs_options are used for GCP access:
* token: Authentication method for GCP. Use 'anon' for annonymous access. See https://gcsfs.readthedocs.io/en/latest/index.html#credentials for more details.
* cache: Use the disk cache or not, only set to false if the data should be accessed once. (Allowed values are: true,True,1,false,False,0).
* project and other arguments are passed to :py:class:`gcsfs.core.GCSFileSystem`
Examples:
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5', fs_options={'token': None})
>>> df = vaex.open('gs://vaex-data/airlines/us_airline_data_1988_2019.hdf5?token=anon')
>>> df = vaex.open('gs://vaex-data/testing/xys.hdf5?token=anon&cache=False')
"""
import vaex
import vaex.convert
try:
if not isinstance(path, (list, tuple)):
# remote and clusters only support single path, not a list
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
if path.startswith("http://") or path.startswith("ws://") or \
path.startswith("vaex+wss://") or path.startswith("wss://") or \
path.startswith("vaex+http://") or path.startswith("vaex+ws://"):
server, name = path.rsplit("/", 1)
url = urlparse(path)
if '?' in name:
name = name[:name.index('?')]
extra_args = {key: values[0] for key, values in parse_qs(url.query).items()}
if 'token' in extra_args:
kwargs['token'] = extra_args['token']
if 'token_trusted' in extra_args:
kwargs['token_trusted'] = extra_args['token_trusted']
client = vaex.connect(server, **kwargs)
return client[name]
if path.startswith("cluster"):
import vaex.enterprise.distributed
return vaex.enterprise.distributed.open(path, *args, **kwargs)
import vaex.file
import glob
if isinstance(path, str):
paths = [path]
else:
paths = path
filenames = []
for path in paths:
path = vaex.file.stringyfy(path)
if path in aliases:
path = aliases[path]
path = vaex.file.stringyfy(path)
naked_path, options = vaex.file.split_options(path)
if glob.has_magic(naked_path):
filenames.extend(list(sorted(vaex.file.glob(path, fs_options=fs_options, fs=fs))))
else:
filenames.append(path)
df = None
if len(filenames) == 0:
raise IOError(f'File pattern did not match anything {path}')
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
filename_hdf5_noshuffle = vaex.convert._convert_name(filenames, shuffle=False)
if len(filenames) == 1:
path = filenames[0]
# # naked_path, _ = vaex.file.split_options(path, fs_options)
_, ext, _ = vaex.file.split_ext(path)
if ext == '.csv': # special case for csv
return vaex.from_csv(path, fs_options=fs_options, fs=fs, convert=convert, **kwargs)
if convert:
path_output = convert if isinstance(convert, str) else filename_hdf5
vaex.convert.convert(
path_input=path, fs_options_input=fs_options, fs_input=fs,
path_output=path_output, fs_options_output=fs_options, fs_output=fs,
*args, **kwargs
)
ds = vaex.dataset.open(path_output, fs_options=fs_options, fs=fs, **kwargs)
else:
ds = vaex.dataset.open(path, fs_options=fs_options, fs=fs, **kwargs)
df = vaex.from_dataset(ds)
if df is None:
if os.path.exists(path):
raise IOError('Could not open file: {}, did you install vaex-hdf5? Is the format supported?'.format(path))
elif len(filenames) > 1:
if convert not in [True, False]:
filename_hdf5 = convert
else:
filename_hdf5 = vaex.convert._convert_name(filenames, shuffle=shuffle)
if os.path.exists(filename_hdf5) and convert: # also check mtime
df = vaex.open(filename_hdf5)
else:
dfs = []
for filename in filenames:
dfs.append(vaex.open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))
df = vaex.concat(dfs)
if convert:
if shuffle:
df = df.shuffle()
df.export_hdf5(filename_hdf5)
df = vaex.open(filename_hdf5)
if df is None:
raise IOError('Unknown error opening: {}'.format(path))
return df
except:
logging.getLogger("vaex").error("error opening %r" % path)
raise
|
52,912 |
def get_reducers_type(ctx: FunctionContext) -> Type:
"""
Determine a more specific model type for functions that combine models.
This function operates on function *calls*. It analyzes each function call
by looking at the function definition and the arguments passed as part of
the function call, then determines a more specific return type for the
function call.
This method accepts a `FunctionContext` as part of the Mypy plugin
interface. This function context provides easy access to:
* `args`: List of "actual arguments" filling each "formal argument" of the
called function. "Actual arguments" are those passed to the function
as part of the function call. "Formal arguments" are the parameters
defined by the function definition. The same actual argument may serve
to fill multiple formal arguments. In some cases the relationship may
even be ambiguous. For example, calling `range(*args)`, the actual
argument `*args` may fill the `start`, `stop` or `step` formal
arguments, depending on the length of the list.
The `args` list is of length `num_formals`, with each element
corresponding to a formal argument. Each value in the `args` list is a
list of actual arguments which may fill the formal argument. For
example, in the function call `range(*args, num)`, `num` may fill the
`start`, `end` or `step` formal arguments depending on the length of
`args`, so type-checking needs to consider all of these possibilities.
* `arg_types`: Type annotation (or inferred type) of each argument. Like
`args`, this value is a list of lists with an outer list entry for each
formal argument and an inner list entry for each possible actual
argument for the formal argument.
* `arg_kinds`: "Kind" of argument passed to the function call. Argument
kinds include positional, star (`*args`), named (`x=y`) and star2
(`**kwargs`) arguments (among others). Like `args`, this value is a list
of lists.
* `context`: AST node representing the function call with all available
type information. Notable attributes include:
* `args` and `arg_kinds`: Simple list of actual arguments, not mapped to
formal arguments.
* `callee`: AST node representing the function being called. Typically
this is a `NameExpr`. To resolve this node to the function definition
it references, accessing `callee.node` will usually return either a
`FuncDef` or `Decorator` node.
* etc.
This function infers a more specific type for model-combining functions by
making certain assumptions about how the function operates based on the
order of its formal arguments and its return type.
If the return type is `Model[InT, XY_YZ_OutT]`, the output of each
argument is expected to be used as the input to the next argument. It's
therefore necessary to check that the output type of each model is
compatible with the input type of the following model. The combined model
has the type `Model[InT, OutT]`, where `InT` is the input type of the
first model and `OutT` is the output type of the last model.
If the return type is `Model[InT, XY_XY_OutT]`, all model arguments
receive input of the same type and are expected to produce output of the
same type. It's therefore necessary to check that all models have the same
input types and the same output types. The combined model has the type
`Model[InT, OutT]`, where `InT` is the input type of all model arguments
and `OutT` is the output type of all model arguments.
Raises:
AssertionError: Raised if a more specific model type couldn't be
determined, indicating that the default general return type should
be used.
"""
# Verify that we have a type-checking API and a default return type (presumably a
# `thinc.model.Model` instance)
assert isinstance(ctx.api, TypeChecker)
assert isinstance(ctx.default_return_type, Instance)
# Verify that we're inspecting a function call to a callable defined or decorated function
assert isinstance(ctx.context, CallExpr)
callee = ctx.context.callee
assert isinstance(callee, NameExpr)
callee_node = callee.node
assert isinstance(callee_node, (FuncDef, Decorator))
callee_node_type = callee_node.type
assert isinstance(callee_node_type, CallableType)
# Verify that the callable returns a `thinc.model.Model`
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
# I haven't implemented this myself because I wasn't able to figure out how to look up the
# `TypeInfo` for a class outside of the module being type-checked
callee_return_type = callee_node_type.ret_type
assert isinstance(callee_return_type, Instance)
assert callee_return_type.type.fullname == thinc_model_fullname
assert callee_return_type.args
assert len(callee_return_type.args) == 2
# Obtain the output type parameter of the `thinc.model.Model` return type
# of the called API function
out_type = callee_return_type.args[1]
# Check if the `Model`'s output type parameter is one of the "special
# type variables" defined to represent model composition (chaining) and
# homogenous reduction
assert isinstance(out_type, TypeVarType)
assert out_type.fullname
if out_type.fullname not in {intoin_outtoout_out_fullname, chained_out_fullname}:
return ctx.default_return_type
# Extract type of each argument used to call the API function, making sure that they are also
# `thinc.model.Model` instances
args = list(itertools.chain(*ctx.args))
arg_types = []
for arg_type in itertools.chain(*ctx.arg_types):
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
assert isinstance(arg_type, Instance)
assert arg_type.type.fullname == thinc_model_fullname
assert len(arg_type.args) == 2
arg_types.append(arg_type)
# Collect neighboring pairs of arguments and their types
arg_pairs = list(zip(args[:-1], args[1:]))
arg_types_pairs = list(zip(arg_types[:-1], arg_types[1:]))
# Determine if passed models will be chained or if they all need to have
# the same input and output type
if out_type.fullname == chained_out_fullname:
# Models will be chained, meaning that the output of each model will
# be passed as the input to the next model
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_chained(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model takes the first model's input and returns the last model's output
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[-1].args[1]]
)
elif out_type.fullname == intoin_outtoout_out_fullname:
# Models must have the same input and output types
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_intoin_outtoout(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model accepts and returns the same types as all passed models
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[0].args[1]]
)
# Make sure the default return type is returned if no branch was selected
assert False, "Thinc mypy plugin error: it should return before this point"
|
def get_reducers_type(ctx: FunctionContext) -> Type:
"""
Determine a more specific model type for functions that combine models.
This function operates on function *calls*. It analyzes each function call
by looking at the function definition and the arguments passed as part of
the function call, then determines a more specific return type for the
function call.
This method accepts a `FunctionContext` as part of the Mypy plugin
interface. This function context provides easy access to:
* `args`: List of "actual arguments" filling each "formal argument" of the
called function. "Actual arguments" are those passed to the function
as part of the function call. "Formal arguments" are the parameters
defined by the function definition. The same actual argument may serve
to fill multiple formal arguments. In some cases the relationship may
even be ambiguous. For example, calling `range(*args)`, the actual
argument `*args` may fill the `start`, `stop` or `step` formal
arguments, depending on the length of the list.
The `args` list is of length `num_formals`, with each element
corresponding to a formal argument. Each value in the `args` list is a
list of actual arguments which may fill the formal argument. For
example, in the function call `range(*args, num)`, `num` may fill the
`start`, `end` or `step` formal arguments depending on the length of
`args`, so type-checking needs to consider all of these possibilities.
* `arg_types`: Type annotation (or inferred type) of each argument. Like
`args`, this value is a list of lists with an outer list entry for each
formal argument and an inner list entry for each possible actual
argument for the formal argument.
* `arg_kinds`: "Kind" of argument passed to the function call. Argument
kinds include positional, star (`*args`), named (`x=y`) and star2
(`**kwargs`) arguments (among others). Like `args`, this value is a list
of lists.
* `context`: AST node representing the function call with all available
type information. Notable attributes include:
* `args` and `arg_kinds`: Simple list of actual arguments, not mapped to
formal arguments.
* `callee`: AST node representing the function being called. Typically
this is a `NameExpr`. To resolve this node to the function definition
it references, accessing `callee.node` will usually return either a
`FuncDef` or `Decorator` node.
* etc.
This function infers a more specific type for model-combining functions by
making certain assumptions about how the function operates based on the
order of its formal arguments and its return type.
If the return type is `Model[InT, XY_YZ_OutT]`, the output of each
argument is expected to be used as the input to the next argument. It's
therefore necessary to check that the output type of each model is
compatible with the input type of the following model. The combined model
has the type `Model[InT, OutT]`, where `InT` is the input type of the
first model and `OutT` is the output type of the last model.
If the return type is `Model[InT, XY_XY_OutT]`, all model arguments
receive input of the same type and are expected to produce output of the
same type. It's therefore necessary to check that all models have the same
input types and the same output types. The combined model has the type
`Model[InT, OutT]`, where `InT` is the input type of all model arguments
and `OutT` is the output type of all model arguments.
Raises:
AssertionError: Raised if a more specific model type couldn't be
determined, indicating that the default general return type should
be used.
"""
# Verify that we have a type-checking API and a default return type (presumably a
# `thinc.model.Model` instance)
assert isinstance(ctx.api, TypeChecker)
assert isinstance(ctx.default_return_type, Instance)
# Verify that we're inspecting a function call to a callable defined or decorated function
assert isinstance(ctx.context, CallExpr)
callee = ctx.context.callee
assert isinstance(callee, NameExpr)
callee_node = callee.node
assert isinstance(callee_node, (FuncDef, Decorator))
callee_node_type = callee_node.type
assert isinstance(callee_node_type, CallableType)
# Verify that the callable returns a `thinc.model.Model`
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
# (figure out how to look up the `TypeInfo` for a class outside of the module being type-checked)
callee_return_type = callee_node_type.ret_type
assert isinstance(callee_return_type, Instance)
assert callee_return_type.type.fullname == thinc_model_fullname
assert callee_return_type.args
assert len(callee_return_type.args) == 2
# Obtain the output type parameter of the `thinc.model.Model` return type
# of the called API function
out_type = callee_return_type.args[1]
# Check if the `Model`'s output type parameter is one of the "special
# type variables" defined to represent model composition (chaining) and
# homogenous reduction
assert isinstance(out_type, TypeVarType)
assert out_type.fullname
if out_type.fullname not in {intoin_outtoout_out_fullname, chained_out_fullname}:
return ctx.default_return_type
# Extract type of each argument used to call the API function, making sure that they are also
# `thinc.model.Model` instances
args = list(itertools.chain(*ctx.args))
arg_types = []
for arg_type in itertools.chain(*ctx.arg_types):
# TODO: Use `map_instance_to_supertype` to map subtypes to `Model` instances.
assert isinstance(arg_type, Instance)
assert arg_type.type.fullname == thinc_model_fullname
assert len(arg_type.args) == 2
arg_types.append(arg_type)
# Collect neighboring pairs of arguments and their types
arg_pairs = list(zip(args[:-1], args[1:]))
arg_types_pairs = list(zip(arg_types[:-1], arg_types[1:]))
# Determine if passed models will be chained or if they all need to have
# the same input and output type
if out_type.fullname == chained_out_fullname:
# Models will be chained, meaning that the output of each model will
# be passed as the input to the next model
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_chained(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model takes the first model's input and returns the last model's output
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[-1].args[1]]
)
elif out_type.fullname == intoin_outtoout_out_fullname:
# Models must have the same input and output types
# Verify that model inputs and outputs are compatible
for (arg1, arg2), (type1, type2) in zip(arg_pairs, arg_types_pairs):
assert isinstance(type1, Instance)
assert isinstance(type2, Instance)
assert type1.type.fullname == thinc_model_fullname
assert type2.type.fullname == thinc_model_fullname
check_intoin_outtoout(
l1_arg=arg1, l1_type=type1, l2_arg=arg2, l2_type=type2, api=ctx.api
)
# Generated model accepts and returns the same types as all passed models
return Instance(
ctx.default_return_type.type, [arg_types[0].args[0], arg_types[0].args[1]]
)
# Make sure the default return type is returned if no branch was selected
assert False, "Thinc mypy plugin error: it should return before this point"
|
22,730 |
def load_cert(cert_path):
"""Reads the certificate PEM file and returns a cryptography.x509 object
:param str cert_path: Path to the certificate
:rtype `cryptography.x509`:
:returns: x509 certificate object
"""
with open(cert_path, 'rb') as fh:
cert_pem = fh.read()
return x509.load_pem_x509_certificate(cert_pem, default_backend())
|
def load_cert(cert_path):
"""Reads the certificate PEM file and returns a cryptography.x509 object
:param str cert_path: Path to the certificate
:rtype: `cryptography.x509.Certificate`
:returns: x509 certificate object
"""
with open(cert_path, 'rb') as fh:
cert_pem = fh.read()
return x509.load_pem_x509_certificate(cert_pem, default_backend())
|
26,927 |
def _strip_unsafe_kubernetes_special_chars(string: str) -> str:
"""
Kubernetes only supports lowercase alphanumeric characters, "-" and "." in
the pod name.
However, there are special rules about how "-" and "." can be used so let's
only keep
alphanumeric chars see here for detail:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
:param string: The requested Pod name
:return: Pod name stripped of any unsafe characters
"""
return ''.join(ch.lower() for ch in list(string) if ch.isalnum()).encode('ascii', 'ignore').decode()
|
def _strip_unsafe_kubernetes_special_chars(string: str) -> str:
"""
Kubernetes only supports lowercase alphanumeric characters, "-" and "." in
the pod name.
However, there are special rules about how "-" and "." can be used so let's
only keep
alphanumeric chars see here for detail:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
:param string: The requested Pod name
:return: Pod name stripped of any unsafe characters
"""
return string.encode('ascii', 'ignore').decode().lower()
|
30,291 |
def sort_by_severity(ioc):
"""
Extract the severity value from the indicator and converts to integer.
The integer is received from SEVERITY_SCORE dictionary with possible values: 0, 1, 2, 3.
In case the indicator has no severity value, the indicator severity score is set to 0 (low).
"""
try:
severity_value = ioc['meta']['severity']
return SEVERITY_SCORE[severity_value]
except KeyError:
return 0
|
def sort_by_severity(ioc):
"""
Extract the severity value from the indicator and converts to integer.
The integer is received from SEVERITY_SCORE dictionary with possible values: 0, 1, 2, 3.
In case the indicator has no severity value, the indicator severity score is set to 0 (low).
"""
try:
severity_value = ioc['meta']['severity']
return SEVERITY_SCORE.get(severity_value, 0)
except KeyError:
return 0
|
23,141 |
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions = list(regions) * len(sources)
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
46,004 |
def integral(input: torch.Tensor) -> torch.Tensor:
r"""Calculates integral of the input tensor.
Args:
image: the input tensor with shape :math:`(B,C,H,W)` with shape :math:`(B,C,H,W)`.
Returns:
Integral tensor for the input tensor
Examples:
>>> input = torch.randn(2,2,5,5)
>>> output = integral(input)
>>> output.shape
torch.Size([2, 2, 5, 5])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
S = torch.cumsum(input, dim=-1)
S = torch.cumsum(S, dim=-2)
return S
|
def integral(input: torch.Tensor) -> torch.Tensor:
r"""Calculates integral of the input tensor.
Args:
image: the input tensor with shape :math:`(B,C,H,W)` with shape :math:`(B,C,H,W)`.
Returns:
Integral tensor for the input tensor with shape :math:`(B,C,H,W)`.
Examples:
>>> input = torch.randn(2,2,5,5)
>>> output = integral(input)
>>> output.shape
torch.Size([2, 2, 5, 5])
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
S = torch.cumsum(input, dim=-1)
S = torch.cumsum(S, dim=-2)
return S
|
53,505 |
def test_fall_back_on_base_config() -> None:
"""Test that we correctly fall back on the base config."""
# A file under the current dir should fall back to the highest level
# For pylint this is ./pylintrc
runner = Run([__name__], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
# When the file is a directory that does not have any of its parents in
# linter._directory_namespaces it should default to the base config
with tempfile.TemporaryDirectory() as tmpdir:
with open(Path(tmpdir) / "test.py", "w", encoding="utf-8") as f:
f.write("1")
Run([str(Path(tmpdir) / "test.py")], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
|
def test_fall_back_on_base_config() -> None:
"""Test that we correctly fall back on the base config."""
# A file under the current dir should fall back to the highest level
# For pylint this is ./pylintrc
runner = Run([__name__], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
# When the file is a directory that does not have any of its parents in
# linter._directory_namespaces it should default to the base config
with open(test_file, "w", encoding="utf-8") as f:
f.write("1")
Run([str(test_file)], exit=False)
assert id(runner.linter.config) == id(runner.linter._base_config)
|
46,959 |
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list
in a python list.
"""
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and isinstance(obj, tf.Tensor):
return obj.numpy().tolist()
elif is_torch_available() and isinstance(obj, torch.Tensor):
return obj.detach().cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
|
def to_py_obj(obj):
"""
Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list
to a python list.
"""
if isinstance(obj, (list, tuple)):
return [to_py_obj(o) for o in obj]
elif is_tf_available() and isinstance(obj, tf.Tensor):
return obj.numpy().tolist()
elif is_torch_available() and isinstance(obj, torch.Tensor):
return obj.detach().cpu().tolist()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
|
28,499 |
def test_cli_args(container: TrackedContainer, http_client: requests.Session) -> None:
"""Container should respect notebook server command line args
(e.g., disabling token security)"""
running_container = container.run_detached(
command=["start-notebook.sh", "--NotebookApp.token=''"],
ports={"8888/tcp": None},
)
host_port = container.get_host_port("8888/tcp")
resp = http_client.get("http://localhost:" + host_port)
resp.raise_for_status()
logs = running_container.logs().decode("utf-8")
LOGGER.debug(logs)
assert "ERROR" not in logs
warnings = TrackedContainer.get_warnings(logs)
assert not warnings
assert "login_submit" not in resp.text
|
def test_cli_args(container: TrackedContainer, http_client: requests.Session) -> None:
"""Container should respect notebook server command line args
(e.g., disabling token security)"""
running_container = container.run_detached(
command=["start-notebook.sh", "--NotebookApp.token=''"],
ports={"8888/tcp": None},
)
host_port = container.get_host_port("8888/tcp")
resp = http_client.get(f"http://localhost:{host_port}")
resp.raise_for_status()
logs = running_container.logs().decode("utf-8")
LOGGER.debug(logs)
assert "ERROR" not in logs
warnings = TrackedContainer.get_warnings(logs)
assert not warnings
assert "login_submit" not in resp.text
|
24,800 |
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = f"{percent:.2f}"
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = f"{percent:.2f}"
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines: list[str] = [
"type",
"number",
"old number",
"difference",
"%documented",
"%badname",
]
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += [
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
]
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
|
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = f"{percent:.2f}"
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = f"{percent:.2f}"
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = [
"type",
"number",
"old number",
"difference",
"%documented",
"%badname",
]
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += [
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
]
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
|
57,863 |
def main():
args = demisto.args()
demisto.info('hi1')
query = args.get('query')
size = int(args.get('size'))
demisto.info('hi')
raw_result = demisto.executeCommand("SearchIncidentsV2", {"query": query,
"size": size})
incidents_len = len(raw_result[0].get("Contents", [""])[0].get("Contents", {}).get("data"))
outputs = {
'Query': query,
'Size': incidents_len,
'ConditionMet': incidents_len >= size
}
return_results(CommandResults(outputs=outputs, outputs_key_field='Query', outputs_prefix='IncidentsCheck'))
|
def main():
args = demisto.args()
query = args.get('query')
size = int(args.get('size'))
raw_result = demisto.executeCommand("SearchIncidentsV2", {"query": query,
"size": size})
incidents_len = len(raw_result[0].get("Contents", [""])[0].get("Contents", {}).get("data"))
outputs = {
'Query': query,
'Size': incidents_len,
'ConditionMet': incidents_len >= size
}
return_results(CommandResults(outputs=outputs, outputs_key_field='Query', outputs_prefix='IncidentsCheck'))
|
29,813 |
def paasta_validate_soa_configs(service, service_path, verbose=False):
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
returncode = True
if not validate_all_schemas(service_path):
returncode = False
if not validate_tron(service_path, verbose):
returncode = False
if not validate_paasta_objects(service_path):
returncode = False
if not validate_unique_instance_names(service_path):
returncode = False
if not validate_autoscaling_configs(service_path):
returncode = False
if not validate_secrets(service_path):
returncode = False
if not validate_min_max_instances(service_path):
returncode = False
return returncode
|
def paasta_validate_soa_configs(service: str, service_path: str, verbose: bool = False) -> bool:
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
returncode = True
if not validate_all_schemas(service_path):
returncode = False
if not validate_tron(service_path, verbose):
returncode = False
if not validate_paasta_objects(service_path):
returncode = False
if not validate_unique_instance_names(service_path):
returncode = False
if not validate_autoscaling_configs(service_path):
returncode = False
if not validate_secrets(service_path):
returncode = False
if not validate_min_max_instances(service_path):
returncode = False
return returncode
|
47,098 |
def infer_framework_from_model(model, model_classes: Optional[Dict[str, type]] = None, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the mode class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
revision (:obj:`str`, `optional`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
else:
try:
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
except OSError:
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
|
def infer_framework_from_model(model, model_classes: Optional[Dict[str, type]] = None, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
revision (:obj:`str`, `optional`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
else:
try:
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
except OSError:
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
|
58,104 |
def handle_prevalence_command(client: Client, command: str, args: dict):
key_names_in_response = {
'ip': 'ip_address',
'domain': 'domain_name',
'process': 'process_name',
'cmd': 'process_command_line',
'hash': 'sha256',
'registry': 'key_name'
}
args.pop('integration_context_brand', None)
args.pop('integration_name', None)
if command == 'core-get-registry-analytics-prevalence':
# arg list should in the following structure:
# args: [
# {"key_name": "some_key1", "value_name": "some_value1"},
# {"key_name": "some_key2", "value_name": "some_value2"}
# ]
args_list = []
keys = argToList(args.get('key_name'))
values = argToList(args.get('value_name'))
if len(keys) != len(values):
raise DemistoException('Number of elements in key_name argument should be equal to the number '
'of elements in value_name argument.')
for i in range(len(keys)):
args_list.append({'key_name': keys[i], 'value_name': values[i]})
else:
args_list = []
for key, value in args.items():
values = argToList(value)
for val in values:
args_list.append({key: val})
request_body = {
'api_id': command,
'args': args_list
}
res = client.get_prevalence(request_body).get('results', [])
command_type = PREVALENCE_COMMANDS[command]
return CommandResults(
readable_output=tableToMarkdown(string_to_table_header(f'{command_type} Prevalence'),
[{
key_names_in_response[command_type]: item.get('args', {}).get(
key_names_in_response[command_type]),
'Prevalence': item.get('value')
} for item in res],
headerTransform=string_to_table_header),
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.AnalyticsPrevalence.{command_type.title()}',
outputs=res,
raw_response=res,
)
|
def handle_prevalence_command(client: Client, command: str, args: dict):
key_names_in_response = {
'ip': 'ip_address',
'domain': 'domain_name',
'process': 'process_name',
'cmd': 'process_command_line',
'hash': 'sha256',
'registry': 'key_name'
}
args.pop('integration_context_brand', None)
args.pop('integration_name', None)
if command == 'core-get-registry-analytics-prevalence':
# arg list should in the following structure:
# args: [
# {"key_name": "some_key1", "value_name": "some_value1"},
# {"key_name": "some_key2", "value_name": "some_value2"}
# ]
args_list = []
keys = argToList(args.get('key_name'))
values = argToList(args.get('value_name'))
if len(keys) != len(values):
raise DemistoException('Number of elements in key_name argument should be equal to the number '
'of elements in value_name argument.')
for key, value in zip(keys, values):
args_list.append({'key_name': key, 'value_name': value})
else:
args_list = []
for key, value in args.items():
values = argToList(value)
for val in values:
args_list.append({key: val})
request_body = {
'api_id': command,
'args': args_list
}
res = client.get_prevalence(request_body).get('results', [])
command_type = PREVALENCE_COMMANDS[command]
return CommandResults(
readable_output=tableToMarkdown(string_to_table_header(f'{command_type} Prevalence'),
[{
key_names_in_response[command_type]: item.get('args', {}).get(
key_names_in_response[command_type]),
'Prevalence': item.get('value')
} for item in res],
headerTransform=string_to_table_header),
outputs_prefix=f'{INTEGRATION_CONTEXT_BRAND}.AnalyticsPrevalence.{command_type.title()}',
outputs=res,
raw_response=res,
)
|
57,729 |
def get_duo_admin_by_name(name):
duo_administrators = admin_api.get_admins()
duo_administrator = next((admin for admin in duo_administrators if admin['name'] == name), None)
entry = get_entry_for_object(
'Information about admin ' + name, duo_administrator, duo_administrator,
{
'DuoAdmin.AdminDetail(val.name && val.name==obj.name)':
{'details': duo_administrator}
}
)
demisto.results(entry)
|
def get_duo_admin_by_name(name):
admin_id = get_user_id(name)
duo_administrator = admin_api.get_admin(admin_id)
entry = get_entry_for_object(
'Information about admin ' + name, duo_administrator, duo_administrator,
{
'DuoAdmin.AdminDetail(val.name && val.name==obj.name)':
{'details': duo_administrator}
}
)
demisto.results(entry)
|
31,652 |
def create_incident_custom_id(incident):
incident_raw_data = json.loads(incident["rawJSON"])
fields_to_add = ['_cd', 'index', '_time', '_indextime', '_raw']
fields_supplied_by_user = demisto.params().get('unique_id_fields', '')
fields_supplied_by_user = '' if not fields_supplied_by_user else fields_supplied_by_user
fields_supplied_by_user = fields_supplied_by_user.split(',')
fields_to_add.extend(fields_supplied_by_user)
incident_custom_id = '___'
for field_name in fields_to_add:
if field_name in incident_raw_data:
incident_custom_id += '{}___{}'.format(field_name, incident_raw_data[field_name])
elif field_name in incident:
incident_custom_id += '{}___{}'.format(field_name, incident[field_name])
extensive_log('ID after all fields were added: {}'.format(incident_custom_id))
unique_id = hashlib.md5(incident_custom_id).hexdigest()
extensive_log('Found incident ID is: {}'.format(unique_id))
return unique_id
|
def create_incident_custom_id(incident):
incident_raw_data = json.loads(incident["rawJSON"])
fields_to_add = ['_cd', 'index', '_time', '_indextime', '_raw']
fields_supplied_by_user = demisto.params().get('unique_id_fields', '')
fields_supplied_by_user = '' if not fields_supplied_by_user else fields_supplied_by_user
fields_to_add.extend(fields_supplied_by_user.split(','))
incident_custom_id = '___'
for field_name in fields_to_add:
if field_name in incident_raw_data:
incident_custom_id += '{}___{}'.format(field_name, incident_raw_data[field_name])
elif field_name in incident:
incident_custom_id += '{}___{}'.format(field_name, incident[field_name])
extensive_log('ID after all fields were added: {}'.format(incident_custom_id))
unique_id = hashlib.md5(incident_custom_id).hexdigest()
extensive_log('Found incident ID is: {}'.format(unique_id))
return unique_id
|
23,755 |
def compress_files(files, symlinks, name, dest_dir, output=None):
t1 = time.time()
# FIXME, better write to disk sequentially and not keep tgz contents in memory
tgz_path = os.path.join(dest_dir, name)
set_dirty(tgz_path)
with open(tgz_path, "wb") as tgz_handle:
# tgz_contents = BytesIO()
tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle)
for filename, dest in sorted(symlinks.items()):
info = tarfile.TarInfo(name=filename)
info.type = tarfile.SYMTYPE
info.linkname = dest
info.size = 0 # A symlink shouldn't have size
tgz.addfile(tarinfo=info)
mask = ~(stat.S_IWOTH | stat.S_IWGRP)
i_file = 0
n_files = len(files)
last_progress = None
if output and n_files > 1 and not output.is_terminal:
output.write("[")
elif output and n_files > 1 and output.is_terminal:
progress_bar = tqdm(total=len(files), desc="Compressing package...",
unit="files", leave=True, dynamic_ncols=False,
ascii=True)
for filename, abs_path in sorted(files.items()):
info = tarfile.TarInfo(name=filename)
info.size = os.stat(abs_path).st_size
info.mode = os.stat(abs_path).st_mode & mask
if os.path.islink(abs_path):
info.type = tarfile.SYMTYPE
info.size = 0 # A symlink shouldn't have size
info.linkname = os.readlink(abs_path) # @UndefinedVariable
tgz.addfile(tarinfo=info)
else:
with open(abs_path, 'rb') as file_handler:
tgz.addfile(tarinfo=info, fileobj=file_handler)
if output and n_files > 1:
i_file = i_file + 1
units = min(50, int(50 * i_file / n_files))
if last_progress != units: # Avoid screen refresh if nothing has change
if not output.is_terminal:
output.write('=' * (units - (last_progress or 0)))
last_progress = units
if output.is_terminal:
progress_bar.set_description("Compressing package: %s/%s files" % (i_file, n_files))
progress_bar.update()
if output and n_files > 1:
if output.is_terminal:
progress_bar.close()
output.rewrite_line("{} [done]".format(progress_bar.desc))
else:
output.writeln("]")
tgz.close()
clean_dirty(tgz_path)
duration = time.time() - t1
log_compressed_files(files, duration, tgz_path)
return tgz_path
|
def compress_files(files, symlinks, name, dest_dir, output=None):
t1 = time.time()
# FIXME, better write to disk sequentially and not keep tgz contents in memory
tgz_path = os.path.join(dest_dir, name)
set_dirty(tgz_path)
with open(tgz_path, "wb") as tgz_handle:
# tgz_contents = BytesIO()
tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle)
for filename, dest in sorted(symlinks.items()):
info = tarfile.TarInfo(name=filename)
info.type = tarfile.SYMTYPE
info.linkname = dest
info.size = 0 # A symlink shouldn't have size
tgz.addfile(tarinfo=info)
mask = ~(stat.S_IWOTH | stat.S_IWGRP)
i_file = 0
n_files = len(files)
last_progress = None
if output and n_files > 1 and not output.is_terminal:
output.write("[")
elif output and n_files > 1 and output.is_terminal:
progress_bar = tqdm(total=len(files), desc="Compressing package...",
unit="files", leave=True, dynamic_ncols=False,
ascii=True, file=output)
for filename, abs_path in sorted(files.items()):
info = tarfile.TarInfo(name=filename)
info.size = os.stat(abs_path).st_size
info.mode = os.stat(abs_path).st_mode & mask
if os.path.islink(abs_path):
info.type = tarfile.SYMTYPE
info.size = 0 # A symlink shouldn't have size
info.linkname = os.readlink(abs_path) # @UndefinedVariable
tgz.addfile(tarinfo=info)
else:
with open(abs_path, 'rb') as file_handler:
tgz.addfile(tarinfo=info, fileobj=file_handler)
if output and n_files > 1:
i_file = i_file + 1
units = min(50, int(50 * i_file / n_files))
if last_progress != units: # Avoid screen refresh if nothing has change
if not output.is_terminal:
output.write('=' * (units - (last_progress or 0)))
last_progress = units
if output.is_terminal:
progress_bar.set_description("Compressing package: %s/%s files" % (i_file, n_files))
progress_bar.update()
if output and n_files > 1:
if output.is_terminal:
progress_bar.close()
output.rewrite_line("{} [done]".format(progress_bar.desc))
else:
output.writeln("]")
tgz.close()
clean_dirty(tgz_path)
duration = time.time() - t1
log_compressed_files(files, duration, tgz_path)
return tgz_path
|
46,749 |
def latent_distribution_test(
A1,
A2,
test="dcorr",
metric="euclidean",
n_components=None,
n_bootstraps=500,
random_state=None,
workers=1,
size_correction=True,
pooled=False,
align_type="sign_flips",
align_kws={},
input_graph=True,
):
"""Two-sample hypothesis test for the problem of determining whether two random
dot product graphs have the same distributions of latent positions.
This test can operate on two graphs where there is no known matching
between the vertices of the two graphs, or even when the number of vertices
is different. Currently, testing is only supported for undirected graphs.
Read more in the `Latent Distribution Two-Graph Testing Tutorial
<https://microsoft.github.io/graspologic/tutorials/inference/latent_distribution_test.html>`_
Parameters
----------
A1, A2 : variable (see description of 'input_graph')
The two graphs, or their embeddings to run a hypothesis test on.
Expected variable type and shape depends on input_graph attribute
test : str (default="hsic")
Backend hypothesis test to use, one of ["cca", "dcorr", "hhg", "rv", "hsic", "mgc"].
These tests are typically used for independence testing, but here they
are used for a two-sample hypothesis test on the latent positions of
two graphs. See :class:`hyppo.ksample.KSample` for more information.
metric : str or function (default="gaussian")
Distance or a kernel metric to use, either a callable or a valid string.
If a callable, then it should behave similarly to either
:func:`sklearn.metrics.pairwise_distances` or to
:func:`sklearn.metrics.pairwise.pairwise_kernels`.
If a string, then it should be either one of the keys in
:py:attr:`sklearn.metrics.pairwise.PAIRED_DISTANCES` one of the keys in
:py:attr:`sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS`, or "gaussian",
which will use a gaussian kernel with an adaptively selected bandwidth.
It is recommended to use kernels (e.g. "gaussian") with kernel-based
hsic test and distances (e.g. "euclidean") with all other tests.
n_components : int or None (default=None)
Number of embedding dimensions. If None, the optimal embedding
dimensions are found by the Zhu and Godsi algorithm.
See :func:`~graspologic.embed.selectSVD` for more information.
This argument is ignored if ``input_graph`` is False.
n_bootstraps : int (default=200)
Number of bootstrap iterations for the backend hypothesis test.
See :class:`hyppo.ksample.KSample` for more information.
random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}
This parameter defines the object to use for drawing random
variates.
If `random_state` is ``None`` the `~np.random.RandomState` singleton is
used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
workers : int (default=1)
Number of workers to use. If more than 1, parallelizes the code.
Supply -1 to use all cores available to the Process.
size_correction : bool (default=True)
Ignored when the two graphs have the same number of vertices. The test
degrades in validity as the number of vertices of the two graphs
diverge from each other, unless a correction is performed.
- True
Whenever the two graphs have different numbers of vertices,
estimates the plug-in estimator for the variance and uses it to
correct the embedding of the larger graph.
- False
Does not perform any modifications (not recommended).
pooled : bool (default=False)
Ignored whenever the two graphs have the same number of vertices or
``size_correction`` is set to False. In order to correct the adjacency
spectral embedding used in the test, it is needed to estimate the
variance for each of the latent position estimates in the larger graph,
which requires to compute different sample moments. These moments can
be computed either over the larger graph (False), or over both graphs
(True). Setting it to True should not affect the behavior of the test
under the null hypothesis, but it is not clear whether it has more
power or less power under which alternatives. Generally not recomended,
as it is untested and included for experimental purposes.
align_type : str, {'sign_flips' (default), 'seedless_procrustes'} or None
Random dot product graphs have an inherent non-identifiability,
associated with their latent positions. Thus, two embeddings of
different graphs may not be orthogonally aligned. Without this accounted
for, two embeddings of different graphs may appear different, even
if the distributions of the true latent positions are the same.
There are several options in terms of how this can be addresssed:
- 'sign_flips'
A simple heuristic that flips the signs of one of the embeddings,
if the medians of the two embeddings in that dimension differ from
each other. See :class:`graspologic.align.SignFlips` for more
information on this procedure. In the limit, this is guaranteed to
lead to a valid test, as long as matrix :math:`X^T X`, where
:math:`X` is the latent positions does not have repeated non-zero
eigenvalues. This may, however, result in an invalid test in the
finite sample case if the some eigenvalues are same or close.
- 'seedless_procrustes'
An algorithm that learns an orthogonal alignment matrix. This
procedure is slower than sign flips, but is guaranteed to yield a
valid test in the limit, and also makes the test more valid in some
finite sample cases, in which the eigenvalues are very close to
each other. See :class:`graspologic.align.SignFlips` for more information
on the procedure.
- None
Do not use any alignment technique. This is strongly not
recommended, as it may often result in a test that is not valid.
align_kws : dict
Keyword arguments for the aligner of choice, either
:class:`graspologic.align.SignFlips` or
:class:`graspologic.align.SeedlessProcrustes`, depending on the ``align_type``.
See respective classes for more information.
input_graph : bool (default=True)
Flag whether to expect two full graphs, or the embeddings.
- True
This function expects graphs, either as NetworkX graph objects
or as adjacency matrices, provided as ndarrays of size (n, n) and
(m, m). They will be embedded using adjacency spectral embeddings.
- False
This function expects adjacency spectral embeddings of the graphs,
they must be ndarrays of size (n, d) and (m, d), where
d must be same. n_components attribute is ignored in this case.
Returns
----------
p_value : float
The overall p value from the test.
sample_T_statistic : float
The observed difference between the embedded latent positions of the
two input graphs.
misc_stats : dictionary
A collection of other statistics obtained from the latent position test
- null_distribution : ndarray, shape (n_bootstraps,)
The distribution of T statistics generated under the null.
- n_components : int
Number of embedding dimensions.
- Q : array, size (d, d)
Final orthogonal matrix, used to modify ``X``.
References
----------
.. [1] Tang, M., Athreya, A., Sussman, D. L., Lyzinski, V., & Priebe, C. E. (2017).
"A nonparametric two-sample hypothesis testing problem for random graphs."
Bernoulli, 23(3), 1599-1630.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Bridgeford, E., Mehta, R., Shen, C., & Vogelstein, J. (2019).
"hyppo: A Comprehensive Multivariate Hypothesis Testing Python Package."
arXiv:1907.02088.
.. [3] Alyakin, A. A., Agterberg, J., Helm, H. S., Priebe, C. E. (2020).
"Correcting a Nonparametric Two-sample Graph Hypothesis Test for Graphs with Different Numbers of Vertices"
arXiv:2008.09434
"""
# check test argument
if not isinstance(test, str):
msg = "test must be a str, not {}".format(type(test))
raise TypeError(msg)
elif test not in _VALID_TESTS:
msg = "Unknown test {}. Valid tests are {}".format(test, _VALID_TESTS)
raise ValueError(msg)
# metric argument is checked when metric_func_ is instantiated
# check n_components argument
if n_components is not None:
if not isinstance(n_components, int):
msg = "n_components must be an int, not {}.".format(type(n_components))
raise TypeError(msg)
# check n_bootstraps argument
if not isinstance(n_bootstraps, int):
msg = "n_bootstraps must be an int, not {}".format(type(n_bootstraps))
raise TypeError(msg)
elif n_bootstraps < 0:
msg = "{} is invalid number of bootstraps, must be non-negative"
raise ValueError(msg.format(n_bootstraps))
# check workers argument
if not isinstance(workers, int):
msg = "workers must be an int, not {}".format(type(workers))
raise TypeError(msg)
# check size_correction argument
if not isinstance(size_correction, bool):
msg = "size_correction must be a bool, not {}".format(type(size_correction))
raise TypeError(msg)
# check pooled argument
if not isinstance(pooled, bool):
msg = "pooled must be a bool, not {}".format(type(pooled))
raise TypeError(msg)
# check align_type argument
if (not isinstance(align_type, str)) and (align_type is not None):
msg = "align_type must be a string or None, not {}".format(type(align_type))
raise TypeError(msg)
align_types_supported = ["sign_flips", "seedless_procrustes", None]
if align_type not in align_types_supported:
msg = "supported align types are {}".format(align_types_supported)
raise ValueError(msg)
# check align_kws argument
if not isinstance(align_kws, dict):
msg = "align_kws must be a dictionary of keyword arguments, not {}".format(
type(align_kws)
)
raise TypeError(msg)
# check input_graph argument
if not isinstance(input_graph, bool):
msg = "input_graph must be a bool, not {}".format(type(input_graph))
raise TypeError(msg)
if input_graph:
A1 = import_graph(A1)
A2 = import_graph(A2)
X1_hat, X2_hat = _embed(A1, A2, n_components)
else:
# check for nx objects, since they are castable to arrays,
# but we don't want that
if not isinstance(A1, np.ndarray):
msg = (
f"Embedding of the first graph is of type {type(A1)}, not "
"np.ndarray. If input_graph is False, the inputs need to be "
"adjacency spectral embeddings, with shapes (n, d) and "
"(m, d), passed as np.ndarrays."
)
raise TypeError(msg)
if not isinstance(A2, np.ndarray):
msg = (
f"Embedding of the second graph is of type {type(A2)}, not an "
"array. If input_graph is False, the inputs need to be "
"adjacency spectral embeddings, with shapes (n, d) and "
"(m, d), passed as np.ndarrays."
)
raise TypeError(msg)
if A1.ndim != 2:
msg = (
"Embedding array of the first graph does not have two dimensions. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
if A2.ndim != 2:
msg = (
"Embedding array of the second graph does not have two dimensions. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
if A1.shape[1] != A2.shape[1]:
msg = (
"Two input embeddings have different number of components. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
# checking for inf values
X1_hat = check_array(A1)
X2_hat = check_array(A2)
if align_type == "sign_flips":
aligner = SignFlips(**align_kws)
X1_hat = aligner.fit_transform(X1_hat, X2_hat)
Q = aligner.Q_
elif align_type == "seedless_procrustes":
aligner = SeedlessProcrustes(**align_kws)
X1_hat = aligner.fit_transform(X1_hat, X2_hat)
Q = aligner.Q_
else:
Q = np.identity(X1_hat.shape[0])
if size_correction:
X1_hat, X2_hat = _sample_modified_ase(
X1_hat, X2_hat, workers=workers, random_state=random_state, pooled=pooled
)
metric_func_ = _instantiate_metric_func(metric=metric, test=test)
test_obj = KSample(test, compute_distance=metric_func_)
data = test_obj.test(X1_hat, X2_hat, reps=n_bootstraps, workers=workers, auto=False)
null_distribution = test_obj.indep_test.null_dist
misc_stats = {
"null_distribution": null_distribution,
"n_components": n_components,
"Q": Q,
}
sample_T_statistic = data[0]
p_value = data[1]
return ldt_result(p_value, sample_T_statistic, misc_stats)
|
def latent_distribution_test(
A1,
A2,
test="dcorr",
metric="euclidean",
n_components=None,
n_bootstraps=500,
random_state=None,
workers=1,
size_correction=True,
pooled=False,
align_type="sign_flips",
align_kws={},
input_graph=True,
):
"""Two-sample hypothesis test for the problem of determining whether two random
dot product graphs have the same distributions of latent positions.
This test can operate on two graphs where there is no known matching
between the vertices of the two graphs, or even when the number of vertices
is different. Currently, testing is only supported for undirected graphs.
Read more in the `Latent Distribution Two-Graph Testing Tutorial
<https://microsoft.github.io/graspologic/tutorials/inference/latent_distribution_test.html>`_
Parameters
----------
A1, A2 : variable (see description of 'input_graph')
The two graphs, or their embeddings to run a hypothesis test on.
Expected variable type and shape depends on input_graph attribute
test : str (default="hsic")
Backend hypothesis test to use, one of ["cca", "dcorr", "hhg", "rv", "hsic", "mgc"].
These tests are typically used for independence testing, but here they
are used for a two-sample hypothesis test on the latent positions of
two graphs. See :class:`hyppo.ksample.KSample` for more information.
metric : str or function (default="gaussian")
Distance or a kernel metric to use, either a callable or a valid string.
If a callable, then it should behave similarly to either
:func:`sklearn.metrics.pairwise_distances` or to
:func:`sklearn.metrics.pairwise.pairwise_kernels`.
If a string, then it should be either one of the keys in
:py:attr:`sklearn.metrics.pairwise.PAIRED_DISTANCES` one of the keys in
:py:attr:`sklearn.metrics.pairwise.PAIRWISE_KERNEL_FUNCTIONS`, or "gaussian",
which will use a gaussian kernel with an adaptively selected bandwidth.
It is recommended to use kernels (e.g. "gaussian") with kernel-based
hsic test and distances (e.g. "euclidean") with all other tests.
n_components : int or None (default=None)
Number of embedding dimensions. If None, the optimal embedding
dimensions are found by the Zhu and Godsi algorithm.
See :func:`~graspologic.embed.selectSVD` for more information.
This argument is ignored if ``input_graph`` is False.
n_bootstraps : int (default=200)
Number of bootstrap iterations for the backend hypothesis test.
See :class:`hyppo.ksample.KSample` for more information.
random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}
This parameter defines the object to use for drawing random
variates.
If `random_state` is ``None`` the `~np.random.RandomState` singleton is
used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
n_jobs : int (default=1)
Number of workers to use. If more than 1, parallelizes the code.
Supply -1 to use all cores available to the Process.
size_correction : bool (default=True)
Ignored when the two graphs have the same number of vertices. The test
degrades in validity as the number of vertices of the two graphs
diverge from each other, unless a correction is performed.
- True
Whenever the two graphs have different numbers of vertices,
estimates the plug-in estimator for the variance and uses it to
correct the embedding of the larger graph.
- False
Does not perform any modifications (not recommended).
pooled : bool (default=False)
Ignored whenever the two graphs have the same number of vertices or
``size_correction`` is set to False. In order to correct the adjacency
spectral embedding used in the test, it is needed to estimate the
variance for each of the latent position estimates in the larger graph,
which requires to compute different sample moments. These moments can
be computed either over the larger graph (False), or over both graphs
(True). Setting it to True should not affect the behavior of the test
under the null hypothesis, but it is not clear whether it has more
power or less power under which alternatives. Generally not recomended,
as it is untested and included for experimental purposes.
align_type : str, {'sign_flips' (default), 'seedless_procrustes'} or None
Random dot product graphs have an inherent non-identifiability,
associated with their latent positions. Thus, two embeddings of
different graphs may not be orthogonally aligned. Without this accounted
for, two embeddings of different graphs may appear different, even
if the distributions of the true latent positions are the same.
There are several options in terms of how this can be addresssed:
- 'sign_flips'
A simple heuristic that flips the signs of one of the embeddings,
if the medians of the two embeddings in that dimension differ from
each other. See :class:`graspologic.align.SignFlips` for more
information on this procedure. In the limit, this is guaranteed to
lead to a valid test, as long as matrix :math:`X^T X`, where
:math:`X` is the latent positions does not have repeated non-zero
eigenvalues. This may, however, result in an invalid test in the
finite sample case if the some eigenvalues are same or close.
- 'seedless_procrustes'
An algorithm that learns an orthogonal alignment matrix. This
procedure is slower than sign flips, but is guaranteed to yield a
valid test in the limit, and also makes the test more valid in some
finite sample cases, in which the eigenvalues are very close to
each other. See :class:`graspologic.align.SignFlips` for more information
on the procedure.
- None
Do not use any alignment technique. This is strongly not
recommended, as it may often result in a test that is not valid.
align_kws : dict
Keyword arguments for the aligner of choice, either
:class:`graspologic.align.SignFlips` or
:class:`graspologic.align.SeedlessProcrustes`, depending on the ``align_type``.
See respective classes for more information.
input_graph : bool (default=True)
Flag whether to expect two full graphs, or the embeddings.
- True
This function expects graphs, either as NetworkX graph objects
or as adjacency matrices, provided as ndarrays of size (n, n) and
(m, m). They will be embedded using adjacency spectral embeddings.
- False
This function expects adjacency spectral embeddings of the graphs,
they must be ndarrays of size (n, d) and (m, d), where
d must be same. n_components attribute is ignored in this case.
Returns
----------
p_value : float
The overall p value from the test.
sample_T_statistic : float
The observed difference between the embedded latent positions of the
two input graphs.
misc_stats : dictionary
A collection of other statistics obtained from the latent position test
- null_distribution : ndarray, shape (n_bootstraps,)
The distribution of T statistics generated under the null.
- n_components : int
Number of embedding dimensions.
- Q : array, size (d, d)
Final orthogonal matrix, used to modify ``X``.
References
----------
.. [1] Tang, M., Athreya, A., Sussman, D. L., Lyzinski, V., & Priebe, C. E. (2017).
"A nonparametric two-sample hypothesis testing problem for random graphs."
Bernoulli, 23(3), 1599-1630.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Bridgeford, E., Mehta, R., Shen, C., & Vogelstein, J. (2019).
"hyppo: A Comprehensive Multivariate Hypothesis Testing Python Package."
arXiv:1907.02088.
.. [3] Alyakin, A. A., Agterberg, J., Helm, H. S., Priebe, C. E. (2020).
"Correcting a Nonparametric Two-sample Graph Hypothesis Test for Graphs with Different Numbers of Vertices"
arXiv:2008.09434
"""
# check test argument
if not isinstance(test, str):
msg = "test must be a str, not {}".format(type(test))
raise TypeError(msg)
elif test not in _VALID_TESTS:
msg = "Unknown test {}. Valid tests are {}".format(test, _VALID_TESTS)
raise ValueError(msg)
# metric argument is checked when metric_func_ is instantiated
# check n_components argument
if n_components is not None:
if not isinstance(n_components, int):
msg = "n_components must be an int, not {}.".format(type(n_components))
raise TypeError(msg)
# check n_bootstraps argument
if not isinstance(n_bootstraps, int):
msg = "n_bootstraps must be an int, not {}".format(type(n_bootstraps))
raise TypeError(msg)
elif n_bootstraps < 0:
msg = "{} is invalid number of bootstraps, must be non-negative"
raise ValueError(msg.format(n_bootstraps))
# check workers argument
if not isinstance(workers, int):
msg = "workers must be an int, not {}".format(type(workers))
raise TypeError(msg)
# check size_correction argument
if not isinstance(size_correction, bool):
msg = "size_correction must be a bool, not {}".format(type(size_correction))
raise TypeError(msg)
# check pooled argument
if not isinstance(pooled, bool):
msg = "pooled must be a bool, not {}".format(type(pooled))
raise TypeError(msg)
# check align_type argument
if (not isinstance(align_type, str)) and (align_type is not None):
msg = "align_type must be a string or None, not {}".format(type(align_type))
raise TypeError(msg)
align_types_supported = ["sign_flips", "seedless_procrustes", None]
if align_type not in align_types_supported:
msg = "supported align types are {}".format(align_types_supported)
raise ValueError(msg)
# check align_kws argument
if not isinstance(align_kws, dict):
msg = "align_kws must be a dictionary of keyword arguments, not {}".format(
type(align_kws)
)
raise TypeError(msg)
# check input_graph argument
if not isinstance(input_graph, bool):
msg = "input_graph must be a bool, not {}".format(type(input_graph))
raise TypeError(msg)
if input_graph:
A1 = import_graph(A1)
A2 = import_graph(A2)
X1_hat, X2_hat = _embed(A1, A2, n_components)
else:
# check for nx objects, since they are castable to arrays,
# but we don't want that
if not isinstance(A1, np.ndarray):
msg = (
f"Embedding of the first graph is of type {type(A1)}, not "
"np.ndarray. If input_graph is False, the inputs need to be "
"adjacency spectral embeddings, with shapes (n, d) and "
"(m, d), passed as np.ndarrays."
)
raise TypeError(msg)
if not isinstance(A2, np.ndarray):
msg = (
f"Embedding of the second graph is of type {type(A2)}, not an "
"array. If input_graph is False, the inputs need to be "
"adjacency spectral embeddings, with shapes (n, d) and "
"(m, d), passed as np.ndarrays."
)
raise TypeError(msg)
if A1.ndim != 2:
msg = (
"Embedding array of the first graph does not have two dimensions. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
if A2.ndim != 2:
msg = (
"Embedding array of the second graph does not have two dimensions. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
if A1.shape[1] != A2.shape[1]:
msg = (
"Two input embeddings have different number of components. "
"If input_graph is False, the inputs need to be adjacency "
"spectral embeddings, with shapes (n, d) and (m, d)"
)
raise ValueError(msg)
# checking for inf values
X1_hat = check_array(A1)
X2_hat = check_array(A2)
if align_type == "sign_flips":
aligner = SignFlips(**align_kws)
X1_hat = aligner.fit_transform(X1_hat, X2_hat)
Q = aligner.Q_
elif align_type == "seedless_procrustes":
aligner = SeedlessProcrustes(**align_kws)
X1_hat = aligner.fit_transform(X1_hat, X2_hat)
Q = aligner.Q_
else:
Q = np.identity(X1_hat.shape[0])
if size_correction:
X1_hat, X2_hat = _sample_modified_ase(
X1_hat, X2_hat, workers=workers, random_state=random_state, pooled=pooled
)
metric_func_ = _instantiate_metric_func(metric=metric, test=test)
test_obj = KSample(test, compute_distance=metric_func_)
data = test_obj.test(X1_hat, X2_hat, reps=n_bootstraps, workers=workers, auto=False)
null_distribution = test_obj.indep_test.null_dist
misc_stats = {
"null_distribution": null_distribution,
"n_components": n_components,
"Q": Q,
}
sample_T_statistic = data[0]
p_value = data[1]
return ldt_result(p_value, sample_T_statistic, misc_stats)
|
8,986 |
def find_lazy(*loaders):
"""Decorate a callable as a find rule with lazy loading.
:param loaders: one or more functions to generate a list of **compiled**
regexes to match patterns in a line.
:type loaders: :term:`function`
Each ``loader`` function must accept a ``settings`` parameter and return a
list (or tuple) of **compiled** regular expressions::
import re
def loader(settings):
return [re.compile(r'<your_rule_pattern>')]
It will be called by Sopel when the bot parses the plugin to register the
find rules to get its regexes. The ``settings`` argument will be the bot's
:class:`sopel.config.Config` object.
If any of the ``loader`` functions raises a
:exc:`~sopel.plugins.exceptions.PluginError` exception, the find rule will
be ignored; it will not fail the plugin's loading.
The decorated function will behave like any other :func:`callable`::
from sopel import plugin
@plugin.find_lazy(loader)
def my_find_rule_handler(bot, trigger):
bot.say('Rule triggered by: %s' % trigger.group(0))
.. versionadded:: 7.1
.. seealso::
When more than one loader is provided, they will be chained together
with the :func:`sopel.tools.chain_loaders` function.
"""
def decorator(function):
function._sopel_callable = True
if not hasattr(function, 'find_rules_lazy_loaders'):
function.find_rules_lazy_loaders = []
function.find_rules_lazy_loaders.extend(loaders)
return function
return decorator
|
def find_lazy(*loaders):
"""Decorate a callable as a find rule with lazy loading.
:param loaders: one or more functions to generate a list of **compiled**
regexes to match patterns in a line
:type loaders: :term:`function`
Each ``loader`` function must accept a ``settings`` parameter and return a
list (or tuple) of **compiled** regular expressions::
import re
def loader(settings):
return [re.compile(r'<your_rule_pattern>')]
It will be called by Sopel when the bot parses the plugin to register the
find rules to get its regexes. The ``settings`` argument will be the bot's
:class:`sopel.config.Config` object.
If any of the ``loader`` functions raises a
:exc:`~sopel.plugins.exceptions.PluginError` exception, the find rule will
be ignored; it will not fail the plugin's loading.
The decorated function will behave like any other :func:`callable`::
from sopel import plugin
@plugin.find_lazy(loader)
def my_find_rule_handler(bot, trigger):
bot.say('Rule triggered by: %s' % trigger.group(0))
.. versionadded:: 7.1
.. seealso::
When more than one loader is provided, they will be chained together
with the :func:`sopel.tools.chain_loaders` function.
"""
def decorator(function):
function._sopel_callable = True
if not hasattr(function, 'find_rules_lazy_loaders'):
function.find_rules_lazy_loaders = []
function.find_rules_lazy_loaders.extend(loaders)
return function
return decorator
|
39,560 |
def parse_advisory(response) -> Iterable[VendorData]:
if "vulns" in response:
for vuln in response["vulns"]:
aliases = []
affected_versions = []
fixed = []
if "aliases" in vuln:
aliases.extend(vuln["aliases"])
if "id" in vuln:
aliases.append(vuln["id"])
if "affected" in vuln:
if "versions" in vuln["affected"][0]:
affected_versions.extend(vuln["affected"][0]["versions"])
if vuln["affected"] and "ranges" in vuln["affected"][0]:
if "events" in vuln["affected"][0]["ranges"][0]:
events = vuln["affected"][0]["ranges"][0]["events"]
if events:
for event in events:
if "introduced" in event:
affected_versions.append(event["introduced"])
if "fixed" in event:
fixed.append(event["fixed"])
yield VendorData(
aliases=sorted(list(set(aliases))),
affected_versions=sorted(list(set(affected_versions))),
fixed_versions=sorted(list(set(fixed))),
)
|
def parse_advisory(response) -> Iterable[VendorData]:
if "vulns" in response:
for vuln in response.get("vulns") or []:
aliases = []
affected_versions = []
fixed = []
if "aliases" in vuln:
aliases.extend(vuln["aliases"])
if "id" in vuln:
aliases.append(vuln["id"])
if "affected" in vuln:
if "versions" in vuln["affected"][0]:
affected_versions.extend(vuln["affected"][0]["versions"])
if vuln["affected"] and "ranges" in vuln["affected"][0]:
if "events" in vuln["affected"][0]["ranges"][0]:
events = vuln["affected"][0]["ranges"][0]["events"]
if events:
for event in events:
if "introduced" in event:
affected_versions.append(event["introduced"])
if "fixed" in event:
fixed.append(event["fixed"])
yield VendorData(
aliases=sorted(list(set(aliases))),
affected_versions=sorted(list(set(affected_versions))),
fixed_versions=sorted(list(set(fixed))),
)
|
28,089 |
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
# Only actually change URLs if we are current provider.
if config.get('system_provider') == 'eddb':
this.system_link['text'] = this.system
this.system_link['url'] = system_url(this.system) # Override standard URL function
this.system_link.update_idletasks()
# But only actually change the URL if we are current station provider.
if config.get('station_provider') == 'eddb':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station) # Override standard URL function
this.station_link.update_idletasks()
|
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station = entry.get('StationName', this.station)
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
# Only actually change URLs if we are current provider.
if config.get('system_provider') == 'eddb':
this.system_link['text'] = this.system
this.system_link['url'] = system_url(this.system) # Override standard URL function
this.system_link.update_idletasks()
# But only actually change the URL if we are current station provider.
if config.get('station_provider') == 'eddb':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station) # Override standard URL function
this.station_link.update_idletasks()
|
7,572 |
def tie_inputs(modelinstance, values):
r"""Tie inputs on a model (singular or compound).
Parameters
----------
modelinstance : `~astropy.modeling.Model`
The model instance, singular or compound.
values : dict[str, str or tuple[str, number] or callable]
A dictionary where the key identifies the parameter which is tied to the
parameter identified in the value. The value can also be:
- a custom callable : (modelinstance) -> parameter_value.
- a tuple of the parameter name and a multiplicative factor
Example
-------
``{'p_1':'p_2'}`` maps `p_1` to be equal to `p_2`.
`p_2` is the "real" parameter which p_1 shadows.
"""
# Future Note: could use AST to enable any expression on the prameters
for tie, toinfo in values.items():
# get the tied parameter (this will shadow the `to` parameter)
param = getattr(modelinstance, tie)
# get the `to` parameter (str or function) and the tying factor
to, mul = toinfo if isinstance(toinfo, tuple) else (toinfo, 1)
# create tie, detecting if callable or multiplicative factor
to = to if callable(to) else _TieParameterTo(to, mul)
setattr(param, "tied", to)
# evaluate tie on the model
setattr(modelinstance, tie, param.tied(modelinstance))
|
def tie_inputs(modelinstance, values):
r"""Tie inputs on a model (singular or compound).
Parameters
----------
modelinstance : `~astropy.modeling.Model`
The model instance, singular or compound.
values : dict[str, str or tuple[str, number] or callable]
A dictionary where the key identifies the parameter which is tied to the
parameter identified in the value. The value can also be:
- a custom callable : (modelinstance) -> parameter_value.
- a tuple of the parameter name and a multiplicative factor
Example
-------
``{'p_1':'p_2'}`` maps `p_1` to be equal to `p_2`.
`p_2` is the "real" parameter which p_1 shadows.
"""
# Future Note: could use AST to enable any expression on the parameters
for tie, toinfo in values.items():
# get the tied parameter (this will shadow the `to` parameter)
param = getattr(modelinstance, tie)
# get the `to` parameter (str or function) and the tying factor
to, mul = toinfo if isinstance(toinfo, tuple) else (toinfo, 1)
# create tie, detecting if callable or multiplicative factor
to = to if callable(to) else _TieParameterTo(to, mul)
setattr(param, "tied", to)
# evaluate tie on the model
setattr(modelinstance, tie, param.tied(modelinstance))
|
37,897 |
def launch_external_viewer(fname):
"""
Open a file in an external viewer program.
Uses the ``xdg-open`` command on Linux, the ``open`` command on macOS, and
the default web browser on other systems.
Parameters
----------
fname : str
The file name of the file (preferably a full path).
"""
# Redirect stdout and stderr to devnull so that the terminal isn't filled
# with noise
run_args = dict(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Open the file with the default viewer.
# Fall back to the browser if can't recognize the operating system.
os_name = sys.platform
if (
os_name.startswith("linux")
or os_name.startswith("freebsd")
and shutil.which("xdg-open")
):
subprocess.run(["xdg-open", fname], check=False, **run_args)
elif os_name == "darwin": # Darwin is macOS
subprocess.run(["open", fname], check=False, **run_args)
else:
webbrowser.open_new_tab("file://{}".format(fname))
|
def launch_external_viewer(fname):
"""
Open a file in an external viewer program.
Uses the ``xdg-open`` command on Linux, the ``open`` command on macOS, and
the default web browser on other systems.
Parameters
----------
fname : str
The file name of the file (preferably a full path).
"""
# Redirect stdout and stderr to devnull so that the terminal isn't filled
# with noise
run_args = dict(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Open the file with the default viewer.
# Fall back to the browser if can't recognize the operating system.
os_name = sys.platform
if os_name.startswith(("linux", "freebsd")) and shutil.which("xdg-open"):
subprocess.run(["xdg-open", fname], check=False, **run_args)
elif os_name == "darwin": # Darwin is macOS
subprocess.run(["open", fname], check=False, **run_args)
else:
webbrowser.open_new_tab("file://{}".format(fname))
|
26,901 |
def get_resource_version(kube_client: client.CoreV1Api, namespace: str):
"""
List pods to get the latest resource version
See https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
"""
pod_list = kube_client.list_namespaced_pod(namespace)
resource_version = pod_list.metadata.resource_version
return resource_version
|
def get_latest_resource_version(kube_client: client.CoreV1Api, namespace: str):
"""
List pods to get the latest resource version
See https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
"""
pod_list = kube_client.list_namespaced_pod(namespace)
resource_version = pod_list.metadata.resource_version
return resource_version
|
26,304 |
def _get_pip_versions():
# This fixture will attempt to detect if tests are being run without
# network connectivity and if so skip some tests
network = True
if not os.environ.get('NETWORK_REQUIRED', False): # pragma: nocover
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen, URLError # Python 2.7 compat
try:
urlopen('https://pypi.org', timeout=1)
except URLError:
# No network, disable most of these tests
network = False
network_versions = [
'pip==9.0.3',
'pip==10.0.1',
'pip==18.1',
# fails due to pypa/pip#6599
# 'pip==19.3.1',
'pip==20.0.2',
'https://github.com/pypa/pip/archive/master.zip',
]
versions = [None] + [
pytest.param(v, **({} if network else {'marks': pytest.mark.skip}))
for v in network_versions
]
return versions
|
def _get_pip_versions():
# This fixture will attempt to detect if tests are being run without
# network connectivity and if so skip some tests
network = True
if not os.environ.get('NETWORK_REQUIRED', False): # pragma: nocover
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen, URLError # Python 2.7 compat
try:
urlopen('https://pypi.org', timeout=1)
except URLError:
# No network, disable most of these tests
network = False
network_versions = [
'pip==9.0.3',
'pip==10.0.1',
'pip==18.1',
# fails due to pypa/pip#6599
pytest.mark.xfail('pip==19.3.1', reason='fails due to pypa/pip#6599'),
'pip==20.0.2',
'https://github.com/pypa/pip/archive/master.zip',
]
versions = [None] + [
pytest.param(v, **({} if network else {'marks': pytest.mark.skip}))
for v in network_versions
]
return versions
|
10,466 |
def check_pytest(args, version):
"""
Returns information about the pytest we are running.
Returns a dict with information, in case more information is added in the
future for other purposes.
:type args: EnvironmentConfig
:type version: str
"""
python = find_python(version)
stdout, _dummy = run_command(
args,
[python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytestcheck.py')],
capture=True,
always=True)
result = json.loads(stdout)
return result
|
def get_pytest_version(args, python_version): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int]]
"""
Returns information about the pytest we are running.
Returns a dict with information, in case more information is added in the
future for other purposes.
:type args: EnvironmentConfig
:type version: str
"""
python = find_python(version)
stdout, _dummy = run_command(
args,
[python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytestcheck.py')],
capture=True,
always=True)
result = json.loads(stdout)
return result
|
33,764 |
def _client(name, config):
return _resource(name, config).meta.client
|
def _configure_node_type_from_network_interface(config, node_kind):
return _resource(name, config).meta.client
|
41,659 |
def test_info_from_poetry_directory_fallback_on_poetry_create_error(
mocker: MockerFixture,
):
import poetry.inspection.info
mocker.patch(
"poetry.inspection.info.Factory.create_poetry", side_effect=RuntimeError
)
mock_get_poetry_package = mocker.spy(
poetry.inspection.info.PackageInfo, "_get_poetry_package"
)
mock_get_pep517_metadata = mocker.patch(
"poetry.inspection.info.get_pep517_metadata"
)
PackageInfo.from_directory(FIXTURE_DIR_INSPECTIONS / "demo_poetry_package")
assert mock_get_poetry_package.call_count == 1
assert mock_get_pep517_metadata.call_count == 1
|
def test_info_from_poetry_directory_fallback_on_poetry_create_error(
mocker: MockerFixture,
):
import poetry.inspection.info
mock_create_poetry = mocker.patch(
"poetry.inspection.info.Factory.create_poetry", side_effect=RuntimeError
)
mock_get_poetry_package = mocker.spy(
poetry.inspection.info.PackageInfo, "_get_poetry_package"
)
mock_get_pep517_metadata = mocker.patch(
"poetry.inspection.info.get_pep517_metadata"
)
PackageInfo.from_directory(FIXTURE_DIR_INSPECTIONS / "demo_poetry_package")
assert mock_get_poetry_package.call_count == 1
assert mock_get_pep517_metadata.call_count == 1
|
45,703 |
def _check_inputs(R, V, timesteps, ar_order):
if len(R.shape) != 3:
raise ValueError("R must be a three-dimensional array")
if R.shape[0] < ar_order + 1:
raise ValueError("R.shape[0] < ar_order+1")
if len(V.shape) != 3:
raise ValueError("V must be a three-dimensional array")
if R.shape[1:3] != V.shape[1:3]:
raise ValueError(
"dimension mismatch between R and V: shape(R)=%s, shape(V)=%s"
% (str(R.shape), str(V.shape))
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
|
def _check_inputs(R, V, timesteps, ar_order):
if R.ndim != 3:
raise ValueError("R must be a three-dimensional array")
if R.shape[0] < ar_order + 1:
raise ValueError("R.shape[0] < ar_order+1")
if len(V.shape) != 3:
raise ValueError("V must be a three-dimensional array")
if R.shape[1:3] != V.shape[1:3]:
raise ValueError(
"dimension mismatch between R and V: shape(R)=%s, shape(V)=%s"
% (str(R.shape), str(V.shape))
)
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
|
29,455 |
def parse_genome_results(self, f):
""" Parse the contents of the Qualimap BamQC genome_results.txt file """
regexes = {
'bam_file': r"bam file = (.+)",
'total_reads': r"number of reads = ([\d,]+)",
'mapped_reads': r"number of mapped reads = ([\d,]+)",
'mapped_bases': r"number of mapped bases = ([\d,]+)",
'sequenced_bases': r"number of sequenced bases = ([\d,]+)",
'mean_insert_size': r"mean insert size = ([\d,\.]+)",
'median_insert_size': r"median insert size = ([\d,\.]+)",
'mean_mapping_quality': r"mean mapping quality = ([\d,\.]+)",
'general_error_rate': r"general error rate = ([\d,\.]+)",
'mean_coverage': r"mean coverageData = ([\d]+\.[\d]+)"
}
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',',''))
except ValueError:
d[k] = r_search.group(1)
# Check we have an input filename
if 'bam_file' not in d:
log.debug("Couldn't find an input filename in genome_results file {}".format(f['fn']))
return None
# Get a nice sample name
s_name = self.clean_s_name(d['bam_file'], f['root'])
# Add to general stats table & calculate a nice % aligned
try:
self.general_stats_data[s_name]['total_reads'] = d['total_reads']
self.general_stats_data[s_name]['mapped_reads'] = d['mapped_reads']
d['percentage_aligned'] = (d['mapped_reads'] / d['total_reads'])*100
self.general_stats_data[s_name]['percentage_aligned'] = d['percentage_aligned']
self.general_stats_data[s_name]['general_error_rate'] = d['general_error_rate']*100
self.general_stats_data[s_name]['mean_coverage'] = d['mean_coverage']
except KeyError:
pass
# Save results
if s_name in self.qualimap_bamqc_genome_results:
log.debug("Duplicate genome results sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_genome_results[s_name] = d
self.add_data_source(f, s_name=s_name, section='genome_results')
|
def parse_genome_results(self, f):
""" Parse the contents of the Qualimap BamQC genome_results.txt file """
regexes = {
'bam_file': r"bam file = (.+)",
'total_reads': r"number of reads = ([\d,]+)",
'mapped_reads': r"number of mapped reads = ([\d,]+)",
'mapped_bases': r"number of mapped bases = ([\d,]+)",
'sequenced_bases': r"number of sequenced bases = ([\d,]+)",
'mean_insert_size': r"mean insert size = ([\d,\.]+)",
'median_insert_size': r"median insert size = ([\d,\.]+)",
'mean_mapping_quality': r"mean mapping quality = ([\d,\.]+)",
'general_error_rate': r"general error rate = ([\d,\.]+)",
'mean_coverage': r"mean coverageData = ([\d,\.]+)"
}
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',',''))
except ValueError:
d[k] = r_search.group(1)
# Check we have an input filename
if 'bam_file' not in d:
log.debug("Couldn't find an input filename in genome_results file {}".format(f['fn']))
return None
# Get a nice sample name
s_name = self.clean_s_name(d['bam_file'], f['root'])
# Add to general stats table & calculate a nice % aligned
try:
self.general_stats_data[s_name]['total_reads'] = d['total_reads']
self.general_stats_data[s_name]['mapped_reads'] = d['mapped_reads']
d['percentage_aligned'] = (d['mapped_reads'] / d['total_reads'])*100
self.general_stats_data[s_name]['percentage_aligned'] = d['percentage_aligned']
self.general_stats_data[s_name]['general_error_rate'] = d['general_error_rate']*100
self.general_stats_data[s_name]['mean_coverage'] = d['mean_coverage']
except KeyError:
pass
# Save results
if s_name in self.qualimap_bamqc_genome_results:
log.debug("Duplicate genome results sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_genome_results[s_name] = d
self.add_data_source(f, s_name=s_name, section='genome_results')
|
31,179 |
def create_context_from_network_artifacts(network_artifacts, ip_context):
domain_context = []
if network_artifacts:
for artifact in network_artifacts:
domain = artifact.get('network_domain')
if domain:
domain_context.append(
{
'Name': domain
}
)
network_ip_details = {
'Address': artifact.get('network_remote_ip'),
'GEO.Country': artifact.get('network_country')
}
remove_nulls_from_dictionary(network_ip_details)
if network_ip_details:
ip_context.append(network_ip_details)
return domain_context
|
def create_context_from_network_artifacts(network_artifacts, ip_context):
domain_context = []
if network_artifacts:
for artifact in network_artifacts:
domain = artifact.get('network_domain')
if domain:
domain_context.append(
{
'Name': domain
}
)
network_ip_details = {
'Address': artifact.get('network_remote_ip'),
'GEO.Country': artifact.get('network_country'),
}
remove_nulls_from_dictionary(network_ip_details)
if network_ip_details:
ip_context.append(network_ip_details)
return domain_context
|
30,461 |
def create_incident_from_offense(offense):
occured = epoch_to_ISO(offense['start_time'])
keys = offense.keys()
labels = []
for i in range(len(keys)):
labels.append({'type': keys[i], 'value': convert_to_str(offense[keys[i]])})
formatted_description = offense['description'].replace('\n', '') if offense['description'] else ''
return {
'name': '{id} {description}'.format(id=offense['id'], description=formatted_description),
'labels': labels,
'rawJSON': json.dumps(offense),
'occurred': occured
}
|
def create_incident_from_offense(offense):
occured = epoch_to_ISO(offense['start_time'])
keys = offense.keys()
labels = []
for i in range(len(keys)):
labels.append({'type': keys[i], 'value': convert_to_str(offense[keys[i]])})
formatted_description = offense['description'].replace('\s\n', ' ').replace('\n', ' ') if offense['description'] else ''
return {
'name': '{id} {description}'.format(id=offense['id'], description=formatted_description),
'labels': labels,
'rawJSON': json.dumps(offense),
'occurred': occured
}
|
31,454 |
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('0.0.0')]
for filename in sorted(os.listdir(release_notes_dir)):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
|
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('0.0.0')]
for filename in os.listdir(release_notes_dir):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
|
25,002 |
def _is_relative_to(self: Path, *other: Path) -> bool:
"""Checks if self is relative to other.
Backport of pathlib.Path.is_relative_to for Python <3.9
TODO: py39: Remove this backport and use stdlib function.
"""
try:
self.relative_to(*other)
return True
except ValueError:
return False
|
def pathlib_relative_to_is_callable(self: Path, *other: Path) -> bool:
"""Checks if self is relative to other.
Backport of pathlib.Path.is_relative_to for Python <3.9
TODO: py39: Remove this backport and use stdlib function.
"""
try:
self.relative_to(*other)
return True
except ValueError:
return False
|
44,323 |
def validate_algorithm_spec(algorithm_settings: list[api_pb2.AlgorithmSetting]) -> (bool, str):
for s in algorithm_settings:
try:
if s.name == "num_epochs":
if not int(s.value) > 0:
return False, "{} should be greate than zero".format(s.name)
# Validate learning rate
if s.name in ["w_lr", "w_lr_min", "alpha_lr"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate weight decay
if s.name in ["w_weight_decay", "alpha_weight_decay"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate w_momentum and w_grad_clip
if s.name in ["w_momentum", "w_grad_clip"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
if s.name == "batch_size":
if s.value is not "None":
if not int(s.value) >= 1:
return False, "batch_size should be greate or equal than one"
if s.name == "num_workers":
if not int(s.value) >= 0:
return False, "num_workers should be greate or equal than zero"
# Validate "init_channels", "print_step", "num_nodes" and "stem_multiplier"
if s.name in ["init_channels", "print_step", "num_nodes", "stem_multiplier"]:
if not int(s.value) >= 1:
return False, "{} should be greate or equal than one".format(s.name)
except Exception as e:
return False, "failed to validate {name}({value}): {exception}".format(name=s.name, value=s.value,
exception=e)
return True, ""
|
def validate_algorithm_spec(algorithm_settings: list[api_pb2.AlgorithmSetting]) -> (bool, str):
for s in algorithm_settings:
try:
if s.name == "num_epochs":
if not int(s.value) > 0:
return False, "{} should be greate than zero".format(s.name)
# Validate learning rate
if s.name in ["w_lr", "w_lr_min", "alpha_lr"]:
if not float(s.value) >= 0.0:
return False, "{} should be greater than or equal to zero".format(s.name)
# Validate weight decay
if s.name in ["w_weight_decay", "alpha_weight_decay"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
# Validate w_momentum and w_grad_clip
if s.name in ["w_momentum", "w_grad_clip"]:
if not float(s.value) >= 0.0:
return False, "{} should be greate or equal than zero".format(s.name)
if s.name == "batch_size":
if s.value is not "None":
if not int(s.value) >= 1:
return False, "batch_size should be greate or equal than one"
if s.name == "num_workers":
if not int(s.value) >= 0:
return False, "num_workers should be greate or equal than zero"
# Validate "init_channels", "print_step", "num_nodes" and "stem_multiplier"
if s.name in ["init_channels", "print_step", "num_nodes", "stem_multiplier"]:
if not int(s.value) >= 1:
return False, "{} should be greate or equal than one".format(s.name)
except Exception as e:
return False, "failed to validate {name}({value}): {exception}".format(name=s.name, value=s.value,
exception=e)
return True, ""
|
23,072 |
def test_slice_array_3d_with_bool_numpy_array():
# https://github.com/ask/dask/issues/6089
array = da.arange(0, 24).reshape((4, 3, 2))
mask = np.arange(0, 24).reshape((4, 3, 2)) > 12
actual = array[mask].compute()
expected = np.arange(13, 24)
assert_eq(actual, expected)
|
def test_slice_array_3d_with_bool_numpy_array():
# https://github.com/dask/dask/issues/6089
array = da.arange(0, 24).reshape((4, 3, 2))
mask = np.arange(0, 24).reshape((4, 3, 2)) > 12
actual = array[mask].compute()
expected = np.arange(13, 24)
assert_eq(actual, expected)
|
2,230 |
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance', a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance', a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3, `whiten='unit-variance'` will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
56,377 |
def raises(
expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any
) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]:
r"""Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
:kwparam match:
If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception using :py:func:`re.search`. To match a literal
string that may contain :std:ref:`special characters <re-syntax>`, the pattern can
first be escaped with :py:func:`re.escape`.
(This is only used when :py:func:`pytest.raises` is used as a context manager,
and passed through to the function otherwise.
When using :py:func:`pytest.raises` as a function, you can use:
``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
.. currentmodule:: _pytest._code
Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type::
>>> import pytest
>>> with pytest.raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
above), or no exception at all, the check will fail instead.
You can also use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with pytest.raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with pytest.raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
details of the captured exception::
>>> with pytest.raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type is ValueError
**Using with** ``pytest.mark.parametrize``
When using :ref:`pytest.mark.parametrize ref`
it is possible to parametrize tests such that
some runs raise an exception and others do not.
See :ref:`parametrizing_conditional_raising` for an example.
**Legacy form**
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
The form above is fully supported but discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run.
More detailed information can be found in the official Python
documentation for :ref:`the try statement <python:try>`.
"""
__tracebackhide__ = True
if expected_exception == ():
raise ValueError(
"Passing expected_exception=() is an error, because it's impossible to "
"raise an exception which is not an instance of any type. Raising exceptions "
"is already understood as failing the test, so you don't need any special "
"code to say 'this should never raise an exception'."
)
if isinstance(expected_exception, type):
excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
else:
excepted_exceptions = expected_exception
for exc in excepted_exceptions:
if not isinstance(exc, type) or not issubclass(exc, BaseException):
msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable]
not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
raise TypeError(msg.format(not_a))
message = f"DID NOT RAISE {expected_exception}"
if not args:
match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None)
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(sorted(kwargs))
msg += "\nUse context-manager form instead?"
raise TypeError(msg)
return RaisesContext(expected_exception, message, match)
else:
func = args[0]
if not callable(func):
raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
try:
func(*args[1:], **kwargs)
except expected_exception as e:
# We just caught the exception - there is a traceback.
assert e.__traceback__ is not None
return _pytest._code.ExceptionInfo.from_exc_info(
(type(e), e, e.__traceback__)
)
fail(message)
|
def raises(
expected_exception: Union[Type[E], Tuple[Type[E], ...]], *args: Any, **kwargs: Any
) -> Union["RaisesContext[E]", _pytest._code.ExceptionInfo[E]]:
r"""Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
:kwparam match:
If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception using :py:func:`re.search`. To match a literal
string that may contain :std:ref:`special characters <re-syntax>`, the pattern can
first be escaped with :py:func:`re.escape`.
(This is only used when :py:func:`pytest.raises` is used as a context manager,
and passed through to the function otherwise.
When using :py:func:`pytest.raises` as a function, you can use:
``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
.. currentmodule:: _pytest._code
Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type::
>>> import pytest
>>> with pytest.raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
above), or no exception at all, the check will fail instead.
You can also use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with pytest.raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with pytest.raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
details of the captured exception::
>>> with pytest.raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with pytest.raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type is ValueError
**Using with** ``pytest.mark.parametrize``
When using :ref:`pytest.mark.parametrize ref`
it is possible to parametrize tests such that
some runs raise an exception and others do not.
See :ref:`parametrizing_conditional_raising` for an example.
**Legacy form**
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
The form above is fully supported but discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run.
More detailed information can be found in the official Python
documentation for :ref:`the try statement <python:try>`.
"""
__tracebackhide__ = True
if expected_exception == ():
raise ValueError(
"The parameter ``expected_exception`` was ``()`` instead of a exception type or a tuple of exception types.\n"
"Please pass a Exception subclass or a Tuple of them to pytest.raises."
)
if isinstance(expected_exception, type):
excepted_exceptions: Tuple[Type[E], ...] = (expected_exception,)
else:
excepted_exceptions = expected_exception
for exc in excepted_exceptions:
if not isinstance(exc, type) or not issubclass(exc, BaseException):
msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable]
not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
raise TypeError(msg.format(not_a))
message = f"DID NOT RAISE {expected_exception}"
if not args:
match: Optional[Union[str, Pattern[str]]] = kwargs.pop("match", None)
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(sorted(kwargs))
msg += "\nUse context-manager form instead?"
raise TypeError(msg)
return RaisesContext(expected_exception, message, match)
else:
func = args[0]
if not callable(func):
raise TypeError(f"{func!r} object (type: {type(func)}) must be callable")
try:
func(*args[1:], **kwargs)
except expected_exception as e:
# We just caught the exception - there is a traceback.
assert e.__traceback__ is not None
return _pytest._code.ExceptionInfo.from_exc_info(
(type(e), e, e.__traceback__)
)
fail(message)
|
58,542 |
def parse_request_item(request_item):
if len(request_item.args) <= 1:
arg = request_item.args[0] if len(request_item.args) == 1 else None
# If the input data from handle is web request, we don't need to wrap
# it in ServeRequest.
if isinstance(arg, starlette.requests.Request):
return (arg, ), {}
elif isinstance(arg, HTTPRequestWrapper):
return (build_starlette_request(arg.scope, arg.body), ), {}
elif request_item.metadata.use_serve_request:
return (ServeRequest(
arg,
request_item.kwargs,
headers=request_item.metadata.http_headers,
method=request_item.metadata.http_method,
), ), {}
return request_item.args, request_item.kwargs
|
def parse_request_item(request_item):
if len(request_item.args) == 1:
arg = request_item.args[0]
# If the input data from handle is web request, we don't need to wrap
# it in ServeRequest.
if isinstance(arg, starlette.requests.Request):
return (arg, ), {}
elif isinstance(arg, HTTPRequestWrapper):
return (build_starlette_request(arg.scope, arg.body), ), {}
elif request_item.metadata.use_serve_request:
return (ServeRequest(
arg,
request_item.kwargs,
headers=request_item.metadata.http_headers,
method=request_item.metadata.http_method,
), ), {}
return request_item.args, request_item.kwargs
|
8,299 |
def backrelation(obj, attribute, restricted=True):
"""Get relating object.
This makes sense when only one item has a relation of this type to obj.
One example is parent -> child where only one parent can exist.
"""
if not attribute:
raise RuntimeError(u'Missing parameter "attribute"')
items = get_relations(obj, attribute=attribute, backrels=True, restricted=restricted)
if len(items) > 1:
raise RuntimeError(u'Multiple incoming relations of type {}.'.format(attribute))
if items:
source_obj = items[0]
check_for_relationchoice(source_obj, attribute)
return source_obj
|
def backrelation(obj, attribute, restricted=True):
"""Get relating object.
This makes sense when only one item has a relation of this type to obj.
One example is parent -> child where only one parent can exist.
"""
if not attribute:
raise RuntimeError(u'Missing parameter "attribute"')
items = get_relations(obj, attribute=attribute, backrels=True, restricted=restricted)
if len(items) > 1:
raise ValueError(f'Multiple incoming relations of type {attribute}.')
if items:
source_obj = items[0]
check_for_relationchoice(source_obj, attribute)
return source_obj
|
6,882 |
def update_field_types(columns):
for col in columns:
if col.fieldtype in ("Link", "Dynamic Link", "Currency") and col.options != "Currency":
col.fieldtype = "Data"
col.options = ""
return columns
|
def update_field_types(columns):
for col in columns:
if col.fieldtype in ("Link", "Dynamic Link") and col.options != "Currency":
col.fieldtype = "Data"
col.options = ""
return columns
|
44,817 |
def _improper_model_uri_msg(uri):
return (
"Not a proper models:/ URI: %s. " % uri
+ "Models URIs must be of the form 'models:/<model_name>/suffix' "
+ "where suffix is a model version, stage, or string '%s'." % _MODELS_URI_SUFFIX_LATEST
)
|
def _improper_model_uri_msg(uri):
return (
"Not a proper models:/ URI: %s. " % uri
+ "Models URIs must be of the form 'models:/<model_name>/suffix' "
+ "where suffix is a model version, stage, or the string '%s'." % _MODELS_URI_SUFFIX_LATEST
)
|
42,961 |
def energies(samples: np.ndarray, wp: np.ndarray) -> np.ndarray:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
def energies(samples: list, wp: np.ndarray) -> list:
r"""Computes the energy of GBS samples in :math:`\text{cm}^{-1}` unit.
**Example usage:**
>>> samples = np.array([[1, 1, 0], [1, 0, 2]])
>>> wp = np.array([700.0, 600.0, 500.0])
>>> energies(samples, wp)
[1300.0, 1700.0]
Args:
samples (array): GBS samples
wp (array): normal mode frequencies in :math:`\text{cm}^{-1}`
Returns:
E (list): list of GBS sample energies in :math:`\text{cm}^{-1}`
"""
E = []
for sample in samples:
e = sum(sample * wp)
E.append(e)
return E
|
55,381 |
def _run(uri, experiment_id, entry_point, version, parameters,
docker_args, backend_name, backend_config, use_conda,
storage_dir, synchronous):
"""
Helper that delegates to the project-running method corresponding to the passed-in backend.
Returns a ``SubmittedRun`` corresponding to the project run.
"""
tracking_store_uri = tracking.get_tracking_uri()
backend_config[PROJECT_USE_CONDA] = use_conda
backend_config[PROJECT_SYNCHRONOUS] = synchronous
backend_config[PROJECT_DOCKER_ARGS] = docker_args
backend_config[PROJECT_STORAGE_DIR] = storage_dir
# TODO: remove this check once local, databricks, kubernetes execution have been refactored
# into their own built-in project execution backends.
if backend_name not in {"databricks", "kubernetes"}:
backend = loader.load_backend(backend_name)
if backend:
submitted_run = backend.run(uri, entry_point, parameters,
version, backend_config, tracking_store_uri, experiment_id)
tracking.MlflowClient().set_tag(submitted_run.run_id, MLFLOW_PROJECT_BACKEND,
backend_name)
return submitted_run
work_dir = fetch_and_validate_project(uri, version, entry_point, parameters)
project = load_project(work_dir)
_validate_execution_environment(project, backend_name)
active_run = get_or_create_run(None, uri, experiment_id, work_dir, version,
entry_point, parameters)
if backend_name == "databricks":
tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND,
"databricks")
from mlflow.projects.databricks import run_databricks
return run_databricks(
remote_run=active_run,
uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters,
experiment_id=experiment_id, cluster_spec=backend_config)
supported_backends = ["databricks"] + list(loader.MLFLOW_BACKENDS.keys())
raise ExecutionException("Got unsupported execution mode %s. Supported "
"values: %s" % (backend_name, supported_backends))
|
def _run(uri, experiment_id, entry_point, version, parameters,
docker_args, backend_name, backend_config, use_conda,
storage_dir, synchronous):
"""
Helper that delegates to the project-running method corresponding to the passed-in backend.
Returns a ``SubmittedRun`` corresponding to the project run.
"""
tracking_store_uri = tracking.get_tracking_uri()
backend_config[PROJECT_USE_CONDA] = use_conda
backend_config[PROJECT_SYNCHRONOUS] = synchronous
backend_config[PROJECT_DOCKER_ARGS] = docker_args
backend_config[PROJECT_STORAGE_DIR] = storage_dir
# TODO: remove this check once databricks execution has been refactored into its own built-in
# project execution backend
# into their own built-in project execution backends.
if backend_name not in {"databricks", "kubernetes"}:
backend = loader.load_backend(backend_name)
if backend:
submitted_run = backend.run(uri, entry_point, parameters,
version, backend_config, tracking_store_uri, experiment_id)
tracking.MlflowClient().set_tag(submitted_run.run_id, MLFLOW_PROJECT_BACKEND,
backend_name)
return submitted_run
work_dir = fetch_and_validate_project(uri, version, entry_point, parameters)
project = load_project(work_dir)
_validate_execution_environment(project, backend_name)
active_run = get_or_create_run(None, uri, experiment_id, work_dir, version,
entry_point, parameters)
if backend_name == "databricks":
tracking.MlflowClient().set_tag(active_run.info.run_id, MLFLOW_PROJECT_BACKEND,
"databricks")
from mlflow.projects.databricks import run_databricks
return run_databricks(
remote_run=active_run,
uri=uri, entry_point=entry_point, work_dir=work_dir, parameters=parameters,
experiment_id=experiment_id, cluster_spec=backend_config)
supported_backends = ["databricks"] + list(loader.MLFLOW_BACKENDS.keys())
raise ExecutionException("Got unsupported execution mode %s. Supported "
"values: %s" % (backend_name, supported_backends))
|
40,428 |
def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(col_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, col_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(col_dict[key], csc[1])
assert perm_dict[key] is None
|
def test_graph_store_conversion():
graph_store = MyGraphStore()
edge_index = get_edge_index(100, 100, 300)
edge_index = sort_edge_index(edge_index, sort_by_row=False)
adj = SparseTensor.from_edge_index(edge_index, sparse_sizes=(100, 100))
coo = (edge_index[0], edge_index[1])
csr = adj.csr()[:2]
csc = adj.csc()[-2::-1]
# Put all edge indices:
graph_store.put_edge_index(edge_index=coo, edge_type=('v', '1', 'v'),
layout='coo', num_nodes=(100, 100),
is_sorted=True)
graph_store.put_edge_index(edge_index=csr, edge_type=('v', '2', 'v'),
layout='csr', num_nodes=(100, 100))
graph_store.put_edge_index(edge_index=csc, edge_type=('v', '3', 'v'),
layout='csc', num_nodes=(100, 100))
def assert_edge_index_equal(expected: torch.Tensor, actual: torch.Tensor):
assert torch.equal(sort_edge_index(expected), sort_edge_index(actual))
# Convert to COO:
row_dict, col_dict, perm_dict = graph_store.coo()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict.keys():
actual = torch.stack((row_dict[key], col_dict[key]))
assert_edge_index_equal(actual, edge_index)
assert perm_dict[key] is None
# Convert to CSR:
row_dict, col_dict, perm_dict = graph_store.csr()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csr[0])
assert torch.equal(colptr_dict[key], csr[1])
if key == ('v', '1', 'v'):
assert perm_dict[key] is not None
# Convert to CSC:
row_dict, col_dict, perm_dict = graph_store.csc()
assert len(row_dict) == len(col_dict) == len(perm_dict) == 3
for key in row_dict:
assert torch.equal(row_dict[key], csc[0])
assert torch.equal(col_dict[key], csc[1])
assert perm_dict[key] is None
|
46,464 |
def granger_causality_tests(ts_cause: TimeSeries,
ts_effect: TimeSeries,
maxlag: int,
addconst: bool = True,
verbose: bool = True
) -> None:
"""
Provides four tests for granger non causality of 2 time series using `statsmodels.tsa.stattools.grangercausalitytests`.
Parameters
----------
ts_cause
An univariate time series. The statistical test determines if this time series
'Granger causes' the time series ts_effect (second parameter). Missing values are not supported.
if H_0 is (non causality) is rejected (p near 0), then there is a 'granger causality'.
ts_effect
Univariate time series 'Granger caused' by ts_cause.
maxlag
If an integer, computes the test for all lags up to maxlag.
If an iterable, computes the tests only for the lags in maxlag.
addconst
Include a constant in the model.
verbose
Print results.
Returns
-------
Dict
All test results, dictionary keys are the number of lags. For each lag the values are a tuple,
with the first element a dictionary with test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted model and the restriction (contrast)
matrix for the parameter f_test.
"""
ts_cause._assert_univariate()
ts_effect._assert_univariate()
raise_if(not ts_cause.has_same_time_as(ts_effect),
'ts_cause and ts_effect time series have different time index.')
return grangercausalitytests(
np.concatenate((ts_effect.values(), ts_cause.values()), axis=1),
maxlag,
addconst,
verbose
)
|
def granger_causality_tests(ts_cause: TimeSeries,
ts_effect: TimeSeries,
maxlag: int,
addconst: bool = True,
verbose: bool = True
) -> None:
"""
Provides four tests for granger non causality of 2 time series using `statsmodels.tsa.stattools.grangercausalitytests`.
Parameters
----------
ts_cause
A univariate deterministic time series. The statistical test determines if this time series
'Granger causes' the time series ts_effect (second parameter). Missing values are not supported.
if H_0 is (non causality) is rejected (p near 0), then there is a 'granger causality'.
ts_effect
Univariate time series 'Granger caused' by ts_cause.
maxlag
If an integer, computes the test for all lags up to maxlag.
If an iterable, computes the tests only for the lags in maxlag.
addconst
Include a constant in the model.
verbose
Print results.
Returns
-------
Dict
All test results, dictionary keys are the number of lags. For each lag the values are a tuple,
with the first element a dictionary with test statistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted model and the restriction (contrast)
matrix for the parameter f_test.
"""
ts_cause._assert_univariate()
ts_effect._assert_univariate()
raise_if(not ts_cause.has_same_time_as(ts_effect),
'ts_cause and ts_effect time series have different time index.')
return grangercausalitytests(
np.concatenate((ts_effect.values(), ts_cause.values()), axis=1),
maxlag,
addconst,
verbose
)
|
4,279 |
def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist',
verbose=None):
"""Compute source depths as distances between vertices and nearest sensor.
Parameters
----------
inst : instance of Forward | instance of SourceSpaces
The object to select vertices from.
info : instance of Info | None
The info structure that contains information about the channels with
respect to which to compute distances.
picks : array-like of int | None
Indices of sensors to include in distance calculations. If `None``
(default) then only MEG channels are used.
trans : str | instance of Transform | None
Either the full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration, or the Transformation itself. If trans
is None, an identity matrix is assumed. Only needed when ``inst`` is a
source space in MRI coordinates.
mode : str
How to compute source depth. 'dist' computes Euclidean distance
between vertices and nearest sensors.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
depth : array of shape (,n_vertices)
The depths of source space vertices with respect to sensors.
"""
from .forward import Forward
if isinstance(inst, Forward):
info = inst['info']
src = inst['src']
elif isinstance(inst, SourceSpaces):
src = inst
if info is None:
raise ValueError('You need to specify an Info object with '
'information about the channels.')
src = inst
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
if trans is None:
raise ValueError('Source space is in MRI coordinates, but no '
'head<->MRI transform was given. Please specify'
'the full path to the appropriate *-trans.fif '
'file as the "trans" parameter.')
if isinstance(trans, string_types):
trans = read_trans(trans, return_all=True)
last_exp = None
for trans in trans: # we got at least 1
try:
trans = _ensure_trans(trans, 'head', 'mri')
except Exception as exp:
last_exp = exp
else:
break
else:
raise last_exp
src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
print('Transform!')
else:
src_trans = Transform('head', 'head') # Identity transform
dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')
# Select channels to be used for distance calculations
if picks is None:
picks = pick_types(info, meg=True)
if len(picks) > 0:
logger.info('Using MEG channels')
else:
logger.info('Using EEG channels')
picks = pick_types(info, eeg=True)
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# get sensor positions
sensor_pos = []
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
# minimum distances per vertex
depths = distance.cdist(sensor_pos, src_pos).min(axis=0)
return depths
|
def vertex_depths(inst, info=None, picks=None, trans=None, mode='dist',
verbose=None):
"""Compute source depths as distances between vertices and nearest sensor.
Parameters
----------
inst : instance of Forward | instance of SourceSpaces
The object to select vertices from.
info : instance of Info | None
The info structure that contains information about the channels with
respect to which to compute distances.
picks : array-like of int | None
Indices of sensors to include in distance calculations. If `None``
(default) then only MEG channels are used.
trans : str | instance of Transform | None
Either the full path to the head<->MRI transform ``*-trans.fif`` file
produced during coregistration, or the Transformation itself. If trans
is None, an identity matrix is assumed. Only needed when ``inst`` is a
source space in MRI coordinates.
mode : str
How to compute source depth. 'dist' computes Euclidean distance
between vertices and nearest sensors.
verbose : bool | str | int | None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
depth : array of shape (,n_vertices)
The depths of source space vertices with respect to sensors.
"""
from .forward import Forward
if isinstance(inst, Forward):
info = inst['info']
src = inst['src']
elif isinstance(inst, SourceSpaces):
src = inst
if info is None:
raise ValueError('You need to specify an Info object with '
'information about the channels.')
src = inst
# Load the head<->MRI transform if necessary
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI:
if trans is None:
raise ValueError('Source space is in MRI coordinates, but no '
'head<->MRI transform was given. Please specify'
'the full path to the appropriate *-trans.fif '
'file as the "trans" parameter.')
if isinstance(trans, string_types):
trans = read_trans(trans, return_all=True)
last_exp = None
for trans in trans: # we got at least 1
try:
trans = _ensure_trans(trans, 'head', 'mri')
except Exception as exp:
last_exp = exp
else:
break
else:
raise last_exp
src_trans = invert_transform(_ensure_trans(trans, 'head', 'mri'))
print('Transform!')
else:
src_trans = Transform('head', 'head') # Identity transform
dev_to_head = _ensure_trans(info['dev_head_t'], 'meg', 'head')
# Select channels to be used for distance calculations
picks = _picks_to_idx(info, picks, 'data', exclude=())
# get vertex position in same coordinates as for sensors below
src_pos = np.vstack([
apply_trans(src_trans, s['rr'][s['inuse'].astype(np.bool)])
for s in src
])
# get sensor positions
sensor_pos = []
for ch in picks:
# MEG channels are in device coordinates, translate them to head
if channel_type(info, ch) in ['mag', 'grad']:
sensor_pos.append(apply_trans(dev_to_head,
info['chs'][ch]['loc'][:3]))
else:
sensor_pos.append(info['chs'][ch]['loc'][:3])
sensor_pos = np.array(sensor_pos)
# minimum distances per vertex
depths = distance.cdist(sensor_pos, src_pos).min(axis=0)
return depths
|
6,494 |
def get_chart_data(emp_att_map, days):
labels = []
datasets = [
{"name": "Absent", "values": []},
{"name": "Present", "values": []},
{"name": "Leave", "values": []},
]
att_abbr_map = get_attendance_status_abbr_map(get_abbr_map=1)
half_day_leave_abbr, full_day_leave_abbr = get_leave_type_abbr(get_abbr=1)
for idx, day in enumerate(days, start=0):
labels.append(day.replace("::100", ""))
total_absent_on_day = 0
total_leave_on_day = 0
total_present_on_day = 0
total_holiday = 0
for emp in emp_att_map.keys():
if emp_att_map[emp][idx]:
abbr = emp_att_map[emp][idx].split(" + ")
if len(abbr) == 1 and abbr[0] not in ["<b>WO</b>", "<b>H</b>"]:
if abbr[0] in full_day_leave_abbr or abbr[0] == "L":
total_leave_on_day +=1
elif att_abbr_map[abbr[0]]["is_present"]:
total_present_on_day += 1
else:
total_absent_on_day += 1
#means half day having two attendance on same day
if len(abbr) == 2:
if abbr[0] in half_day_leave_abbr:
total_leave_on_day +=0.5
if att_abbr_map[abbr[1]]["is_present"]:
total_present_on_day += 0.5
else:
total_absent_on_day += 0.5
datasets[0]["values"].append(total_absent_on_day)
datasets[1]["values"].append(total_present_on_day)
datasets[2]["values"].append(total_leave_on_day)
chart = {
"data": {
'labels': labels,
'datasets': datasets
}
}
chart["type"] = "line"
return chart
|
def get_chart_data(emp_att_map, days):
labels = []
datasets = [
{"name": "Absent", "values": []},
{"name": "Present", "values": []},
{"name": "Leave", "values": []},
]
att_abbr_map = get_attendance_status_abbr_map(get_abbr_map=1)
half_day_leave_abbr, full_day_leave_abbr = get_leave_type_abbr(get_abbr=1)
for idx, day in enumerate(days, start=0):
labels.append(day.replace("::100", ""))
total_absent_on_day = 0
total_leave_on_day = 0
total_present_on_day = 0
total_holiday = 0
for emp in emp_att_map.keys():
if emp_att_map[emp][idx]:
abbr = emp_att_map[emp][idx].split(" + ")
if len(abbr) and abbr[0] not in ["<b>WO</b>", "<b>H</b>"]:
if abbr[0] in full_day_leave_abbr or abbr[0] == "L":
total_leave_on_day +=1
elif att_abbr_map[abbr[0]]["is_present"]:
total_present_on_day += 1
else:
total_absent_on_day += 1
#means half day having two attendance on same day
if len(abbr) == 2:
if abbr[0] in half_day_leave_abbr:
total_leave_on_day +=0.5
if att_abbr_map[abbr[1]]["is_present"]:
total_present_on_day += 0.5
else:
total_absent_on_day += 0.5
datasets[0]["values"].append(total_absent_on_day)
datasets[1]["values"].append(total_present_on_day)
datasets[2]["values"].append(total_leave_on_day)
chart = {
"data": {
'labels': labels,
'datasets': datasets
}
}
chart["type"] = "line"
return chart
|
49,600 |
def test_to_dataframe_optimize_graph():
x = db.from_sequence(
[{"name": "test1", "v1": 1}, {"name": "test2", "v1": 2}], npartitions=2
)
# linear operations will be fused by graph optimization
y = x.map(lambda a: dict(**a, v2=a["v1"] + 1))
y = y.map(lambda a: dict(**a, v3=a["v2"] + 1))
# with optimizations
d = y.to_dataframe()["v3"]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 2
# no optimizations
d2 = y.to_dataframe(optimize_graph=False)["v3"]
# due to fusing the unoptimized graph will be larger
assert len(dict(d2.dask)) > len(dict(d.dask))
assert (d.compute() == d2.compute()).all()
|
def test_to_dataframe_optimize_graph():
x = db.from_sequence(
[{"name": "test1", "v1": 1}, {"name": "test2", "v1": 2}], npartitions=2
)
# linear operations will be fused by graph optimization
y = x.map(lambda a: dict(**a, v2=a["v1"] + 1))
y = y.map(lambda a: dict(**a, v3=a["v2"] + 1))
# with optimizations
d = y.to_dataframe()["v3"]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 2
# no optimizations
d2 = y.to_dataframe(optimize_graph=False)["v3"]
# without fusing, both `map` calls will still be there
assert len([k for k in d.dask if k[0].startswith("lambda")]) == d.npartitions * 2
assert (d.compute() == d2.compute()).all()
|
43,697 |
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that "rewards"
bitstrings encoding graph colourings of :math:`G` (assigns them a lower
energy) with edges that end in nodes with colourings supplied in ``reward``.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are "rewarded" by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(1.0) [Z0 Z1] + (1.0) [Z0] + (2.0) [Z1] + (1.0) [Z1 Z2] + (1.0) [Z2]
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither, or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.5 * sign, 0.5 * sign, 0.5 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-1 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.5 * sign, -0.5 * sign, -0.5 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian.
Given some graph, :math:`G`, this method will return a Hamiltonian that "rewards"
bitstrings encoding graph colourings of :math:`G` (assigns them a lower
energy) with edges that end in nodes with colourings supplied in ``reward``.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are "rewarded" by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(1.0) [Z0 Z1] + (1.0) [Z0] + (2.0) [Z1] + (1.0) [Z1 Z2] + (1.0) [Z2]
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither, or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.5 * sign, 0.5 * sign, 0.5 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-1 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.5 * sign, -0.5 * sign, -0.5 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
23,516 |
def get_passed_tests():
"""
Get the list of passed tests by inspecting the log generated by pytest.
This is useful on CIs to restart the test suite from the point where a
segfault was thrown by it.
"""
# This assumes the pytest log is placed next to this file. That's where
# we put it on CIs.
if osp.isfile('pytest_log.txt'):
with open('pytest_log.txt') as f:
logfile = f.readlines()
# All lines that start with 'spyder' are tests. The rest are
# informative messages.
test_re = re.compile(r'(spyder.*) (SKIPPED|PASSED|XFAIL) ')
tests = []
for line in logfile:
match = test_re.match(line)
if match:
tests.append(match.group(1))
return tests
else:
return []
|
def get_passed_tests():
"""
Get the list of passed tests by inspecting the log generated by pytest.
This is useful on CIs to restart the test suite from the point where a
segfault was thrown by it.
"""
# This assumes the pytest log is placed next to this file. That's where
# we put it on CIs.
if osp.isfile('pytest_log.txt'):
with open('pytest_log.txt') as f:
logfile = f.readlines()
# Detect all tests that passed before.
test_re = re.compile(r'(spyder.*) (SKIPPED|PASSED|XFAIL) ')
tests = []
for line in logfile:
match = test_re.match(line)
if match:
tests.append(match.group(1))
return tests
else:
return []
|
47,289 |
def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
original_name: str = name
try:
if isinstance(model, ElectraForMaskedLM):
name = name.replace("electra/embeddings/", "generator/embeddings/")
if discriminator_or_generator == "generator":
name = name.replace("electra/", "discriminator/")
name = name.replace("generator/", "electra/")
name = name.replace("dense_1", "dense_prediction")
name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
name = name.split("/")
# print(original_name, name)
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["global_step", "temperature"] for n in name):
logger.info(f"Skipping {original_name}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name.endswith("_embeddings"):
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if not pointer.shape == array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name}", original_name)
pointer.data = torch.from_numpy(array)
except AttributeError as e:
print(f"Skipping {original_name}", name, e)
continue
return model
|
def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
original_name: str = name
try:
if isinstance(model, ElectraForMaskedLM):
name = name.replace("electra/embeddings/", "generator/embeddings/")
if discriminator_or_generator == "generator":
name = name.replace("electra/", "discriminator/")
name = name.replace("generator/", "electra/")
name = name.replace("dense_1", "dense_prediction")
name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
name = name.split("/")
# print(original_name, name)
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["global_step", "temperature"] for n in name):
logger.info(f"Skipping {original_name}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name.endswith("_embeddings"):
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name}", original_name)
pointer.data = torch.from_numpy(array)
except AttributeError as e:
print(f"Skipping {original_name}", name, e)
continue
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.