id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
12,965 |
def validate_one_of_args_is_in_query(*args):
# split args into 2-element tuples
splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203
# filter trueish values from each tuple
filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))
if len(filter_args) > 1:
rest_args = ", ".join([f"'{item[0]}'" for item in filter_args[1:]])
raise GraphQLError(
f"Argument '{filter_args[0][0]}' cannot be combined with {rest_args}"
)
if not filter_args:
required_args = ", ".join([f"'{item[0]}'" for item in splitted_args])
raise GraphQLError(f"At least one of arguments is required: {required_args}.")
|
def validate_one_of_args_is_in_query(*args):
# split args into a list with 2-element tuples: [(arg1_name, arg1_value), (arg2_name, arg2_value), ...]
splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203
# filter trueish values from each tuple
filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))
if len(filter_args) > 1:
rest_args = ", ".join([f"'{item[0]}'" for item in filter_args[1:]])
raise GraphQLError(
f"Argument '{filter_args[0][0]}' cannot be combined with {rest_args}"
)
if not filter_args:
required_args = ", ".join([f"'{item[0]}'" for item in splitted_args])
raise GraphQLError(f"At least one of arguments is required: {required_args}.")
|
53,871 |
def draw_termite_plot(
values_mat,
col_labels,
row_labels,
*,
highlight_cols=None,
highlight_colors=None,
save=False,
rc_params=None,
):
"""
Make a "termite" plot, typically used for assessing topic models with a tabular
layout that promotes comparison of terms both within and across topics.
Args:
values_mat (:class:`np.ndarray` or matrix): matrix of values with shape
(# row labels, # col labels) used to size the dots on the grid
col_labels (seq[str]): labels used to identify x-axis ticks on the grid
row_labels(seq[str]): labels used to identify y-axis ticks on the grid
highlight_cols (int or seq[int], optional): indices for columns
to visually highlight in the plot with contrasting colors
highlight_colors (tuple of 2-tuples): each 2-tuple corresponds to a pair
of (light/dark) matplotlib-friendly colors used to highlight a single
column; if not specified (default), a good set of 6 pairs are used
save (str, optional): give the full /path/to/fname on disk to save figure
rc_params (dict, optional): allow passing parameters to rc_context in matplotlib.plyplot,
details in https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.rc_context.html
Returns:
:obj:`matplotlib.axes.Axes.axis`: Axis on which termite plot is plotted.
Raises:
ValueError: if more columns are selected for highlighting than colors
or if any of the inputs' dimensions don't match
References:
Chuang, Jason, Christopher D. Manning, and Jeffrey Heer. "Termite:
Visualization techniques for assessing textual topic models."
Proceedings of the International Working Conference on Advanced
Visual Interfaces. ACM, 2012.
See Also:
:meth:`TopicModel.termite_plot() <textacy.tm.topic_model.TopicModel.termite_plot>`
"""
try:
plt
except NameError:
raise ImportError(
"`matplotlib` is not installed, so `textacy.viz` won't work; "
"install it individually via `$ pip install matplotlib`, or "
"along with textacy via `pip install textacy[viz]`."
)
n_rows, n_cols = values_mat.shape
max_val = np.max(values_mat)
if n_rows != len(row_labels):
msg = "values_mat and row_labels dimensions don't match: {} vs. {}".format(
n_rows, len(row_labels)
)
raise ValueError(msg)
if n_cols != len(col_labels):
msg = "values_mat and col_labels dimensions don't match: {} vs. {}".format(
n_cols, len(col_labels)
)
raise ValueError(msg)
if highlight_colors is None:
highlight_colors = COLOR_PAIRS
if highlight_cols is not None:
if isinstance(highlight_cols, int):
highlight_cols = (highlight_cols,)
elif len(highlight_cols) > len(highlight_colors):
msg = "no more than {} columns may be highlighted at once".format(
len(highlight_colors)
)
raise ValueError(msg)
highlight_colors = {hc: COLOR_PAIRS[i] for i, hc in enumerate(highlight_cols)}
_rc_params = RC_PARAMS.copy()
if rc_params:
_rc_params.update(rc_params)
with plt.rc_context(RC_PARAMS):
fig, ax = plt.subplots(figsize=(pow(n_cols, 0.8), pow(n_rows, 0.66)))
_ = ax.set_yticks(range(n_rows))
yticklabels = ax.set_yticklabels(row_labels, fontsize=14, color="gray")
if highlight_cols is not None:
for i, ticklabel in enumerate(yticklabels):
max_tick_val = max(values_mat[i, hc] for hc in range(n_cols))
for hc in highlight_cols:
if max_tick_val > 0 and values_mat[i, hc] == max_tick_val:
ticklabel.set_color(highlight_colors[hc][1])
ax.get_xaxis().set_ticks_position("top")
_ = ax.set_xticks(range(n_cols))
xticklabels = ax.set_xticklabels(
col_labels, fontsize=14, color="gray", rotation=-60, ha="right"
)
# Create offset transform by 5 points in x direction
dx = 10/72.; dy = 0/72.
offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)
for label in ax.xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
if highlight_cols is not None:
gridlines = ax.get_xgridlines()
for i, ticklabel in enumerate(xticklabels):
if i in highlight_cols:
ticklabel.set_color(highlight_colors[i][1])
gridlines[i].set_color(highlight_colors[i][0])
gridlines[i].set_alpha(0.5)
for col_ind in range(n_cols):
if highlight_cols is not None and col_ind in highlight_cols:
ax.scatter(
[col_ind for _ in range(n_rows)],
[i for i in range(n_rows)],
s=600 * (values_mat[:, col_ind] / max_val),
alpha=0.5,
linewidth=1,
color=highlight_colors[col_ind][0],
edgecolor=highlight_colors[col_ind][1],
)
else:
ax.scatter(
[col_ind for _ in range(n_rows)],
[i for i in range(n_rows)],
s=600 * (values_mat[:, col_ind] / max_val),
alpha=0.5,
linewidth=1,
color="lightgray",
edgecolor="gray",
)
_ = ax.set_xlim(left=-1, right=n_cols)
_ = ax.set_ylim(bottom=-1, top=n_rows)
ax.invert_yaxis() # otherwise, values/labels go from bottom to top
if save:
fig.savefig(save, bbox_inches="tight", dpi=100)
return ax
|
def draw_termite_plot(
values_mat,
col_labels,
row_labels,
*,
highlight_cols=None,
highlight_colors=None,
save=False,
rc_params=None,
):
"""
Make a "termite" plot, typically used for assessing topic models with a tabular
layout that promotes comparison of terms both within and across topics.
Args:
values_mat (:class:`np.ndarray` or matrix): matrix of values with shape
(# row labels, # col labels) used to size the dots on the grid
col_labels (seq[str]): labels used to identify x-axis ticks on the grid
row_labels(seq[str]): labels used to identify y-axis ticks on the grid
highlight_cols (int or seq[int], optional): indices for columns
to visually highlight in the plot with contrasting colors
highlight_colors (tuple of 2-tuples): each 2-tuple corresponds to a pair
of (light/dark) matplotlib-friendly colors used to highlight a single
column; if not specified (default), a good set of 6 pairs are used
save (str, optional): give the full /path/to/fname on disk to save figure
rc_params (dict, optional): allow passing parameters to rc_context in matplotlib.plyplot,
details in https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.rc_context.html
Returns:
:obj:`matplotlib.axes.Axes.axis`: Axis on which termite plot is plotted.
Raises:
ValueError: if more columns are selected for highlighting than colors
or if any of the inputs' dimensions don't match
References:
Chuang, Jason, Christopher D. Manning, and Jeffrey Heer. "Termite:
Visualization techniques for assessing textual topic models."
Proceedings of the International Working Conference on Advanced
Visual Interfaces. ACM, 2012.
See Also:
:meth:`TopicModel.termite_plot() <textacy.tm.topic_model.TopicModel.termite_plot>`
"""
try:
plt
except NameError:
raise ImportError(
"`matplotlib` is not installed, so `textacy.viz` won't work; "
"install it individually via `$ pip install matplotlib`, or "
"along with textacy via `pip install textacy[viz]`."
)
n_rows, n_cols = values_mat.shape
max_val = np.max(values_mat)
if n_rows != len(row_labels):
msg = "values_mat and row_labels dimensions don't match: {} vs. {}".format(
n_rows, len(row_labels)
)
raise ValueError(msg)
if n_cols != len(col_labels):
msg = "values_mat and col_labels dimensions don't match: {} vs. {}".format(
n_cols, len(col_labels)
)
raise ValueError(msg)
if highlight_colors is None:
highlight_colors = COLOR_PAIRS
if highlight_cols is not None:
if isinstance(highlight_cols, int):
highlight_cols = (highlight_cols,)
elif len(highlight_cols) > len(highlight_colors):
msg = "no more than {} columns may be highlighted at once".format(
len(highlight_colors)
)
raise ValueError(msg)
highlight_colors = {hc: COLOR_PAIRS[i] for i, hc in enumerate(highlight_cols)}
_rc_params = RC_PARAMS.copy()
if rc_params:
_rc_params.update(rc_params)
with plt.rc_context(RC_PARAMS):
fig, ax = plt.subplots(figsize=(pow(n_cols, 0.8), pow(n_rows, 0.66)))
_ = ax.set_yticks(range(n_rows))
yticklabels = ax.set_yticklabels(row_labels, fontsize=14, color="gray")
if highlight_cols is not None:
for i, ticklabel in enumerate(yticklabels):
max_tick_val = max(values_mat[i, hc] for hc in range(n_cols))
for hc in highlight_cols:
if max_tick_val > 0 and values_mat[i, hc] == max_tick_val:
ticklabel.set_color(highlight_colors[hc][1])
ax.get_xaxis().set_ticks_position("top")
_ = ax.set_xticks(range(n_cols))
xticklabels = ax.set_xticklabels(
col_labels, fontsize=14, color="gray", rotation=-60, ha="right"
)
# Create offset transform by 5 points in x direction
dx = 10 / 72
dy = 0
offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)
for label in ax.xaxis.get_majorticklabels():
label.set_transform(label.get_transform() + offset)
if highlight_cols is not None:
gridlines = ax.get_xgridlines()
for i, ticklabel in enumerate(xticklabels):
if i in highlight_cols:
ticklabel.set_color(highlight_colors[i][1])
gridlines[i].set_color(highlight_colors[i][0])
gridlines[i].set_alpha(0.5)
for col_ind in range(n_cols):
if highlight_cols is not None and col_ind in highlight_cols:
ax.scatter(
[col_ind for _ in range(n_rows)],
[i for i in range(n_rows)],
s=600 * (values_mat[:, col_ind] / max_val),
alpha=0.5,
linewidth=1,
color=highlight_colors[col_ind][0],
edgecolor=highlight_colors[col_ind][1],
)
else:
ax.scatter(
[col_ind for _ in range(n_rows)],
[i for i in range(n_rows)],
s=600 * (values_mat[:, col_ind] / max_val),
alpha=0.5,
linewidth=1,
color="lightgray",
edgecolor="gray",
)
_ = ax.set_xlim(left=-1, right=n_cols)
_ = ax.set_ylim(bottom=-1, top=n_rows)
ax.invert_yaxis() # otherwise, values/labels go from bottom to top
if save:
fig.savefig(save, bbox_inches="tight", dpi=100)
return ax
|
25,065 |
def install_local_package(package_root, path):
"""This installs a local dependency of a package."""
# Becaus of these bugs:
# - pip https://github.com/pypa/pip/issues/4390
# - setuptools https://github.com/pypa/setuptools/issues/392
# we cannot just call `pip install --target $folder --editable $package`.
# Hence the workaround of first installing only the package and then it's dependencies
# XXX: windows
env = dict(os.environ)
env["PYTHONPATH"] = package_root
# Step 1: generate egg info and link us into the target folder.
rv = portable_popen(
[
sys.executable,
"-m",
"pip",
"install",
"--editable",
path,
"--install-option=--install-dir=%s" % package_root,
"--no-deps",
],
env=env,
).wait()
if rv != 0:
raise RuntimeError("Failed to install local package")
# Step 2: generate the egg info into a temp folder to find the
# requirements.
tmp = tempfile.mkdtemp()
try:
rv = portable_popen(
[
sys.executable,
"setup.py",
"--quiet",
"egg_info",
"--quiet",
"--egg-base",
tmp,
],
cwd=path,
).wait()
dirs = os.listdir(tmp)
if rv != 0 or len(dirs) != 1:
raise RuntimeError("Failed to create egg info for local package.")
requires_path = os.path.join(tmp, dirs[0], "requires.txt")
if os.path.isfile(requires_path):
# We have dependencies, install them!
requirements_path = requiriements_txt_from_requires_file_in_same_directory(
requires_path
)
download_and_install_package(
package_root, requirements_file=requirements_path
)
finally:
shutil.rmtree(tmp)
|
def install_local_package(package_root, path):
"""This installs a local dependency of a package."""
# Because of these bugs:
# - pip https://github.com/pypa/pip/issues/4390
# - setuptools https://github.com/pypa/setuptools/issues/392
# we cannot just call `pip install --target $folder --editable $package`.
# Hence the workaround of first installing only the package and then it's dependencies
# XXX: windows
env = dict(os.environ)
env["PYTHONPATH"] = package_root
# Step 1: generate egg info and link us into the target folder.
rv = portable_popen(
[
sys.executable,
"-m",
"pip",
"install",
"--editable",
path,
"--install-option=--install-dir=%s" % package_root,
"--no-deps",
],
env=env,
).wait()
if rv != 0:
raise RuntimeError("Failed to install local package")
# Step 2: generate the egg info into a temp folder to find the
# requirements.
tmp = tempfile.mkdtemp()
try:
rv = portable_popen(
[
sys.executable,
"setup.py",
"--quiet",
"egg_info",
"--quiet",
"--egg-base",
tmp,
],
cwd=path,
).wait()
dirs = os.listdir(tmp)
if rv != 0 or len(dirs) != 1:
raise RuntimeError("Failed to create egg info for local package.")
requires_path = os.path.join(tmp, dirs[0], "requires.txt")
if os.path.isfile(requires_path):
# We have dependencies, install them!
requirements_path = requiriements_txt_from_requires_file_in_same_directory(
requires_path
)
download_and_install_package(
package_root, requirements_file=requirements_path
)
finally:
shutil.rmtree(tmp)
|
42,120 |
def test_tell_invalid() -> None:
study = create_study()
# Missing values for completions.
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.COMPLETE)
# `state` must be None or finished state
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.RUNNING)
# `state` must be None or finished state
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.WAITING)
# `value` must be None for `TrialState.PRUNED`
with pytest.raises(ValueError):
study.tell(study.ask(), values=1, state=TrialState.PRUNED)
# `value` must be None for `TrialState.FAIL`
with pytest.raises(ValueError):
study.tell(study.ask(), values=1, state=TrialState.FAIL)
# Trial that has not been asked for cannot be told.
with pytest.raises(ValueError):
study.tell(study.ask().number + 1, 1.0)
# Waiting trial cannot be told.
with pytest.raises(ValueError):
study.enqueue_trial({})
study.tell(len(study.trials) - 1, 1.0)
# It must be Trial or int for trial.
with pytest.raises(TypeError):
study.tell("1", 1.0) # type: ignore
|
def test_tell_invalid() -> None:
study = create_study()
# Missing values for completions.
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.COMPLETE)
# `state` must be None or finished state
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.RUNNING)
# `state` must be None or finished state
with pytest.raises(ValueError):
study.tell(study.ask(), state=TrialState.WAITING)
# `value` must be None for `TrialState.PRUNED`
with pytest.raises(ValueError):
study.tell(study.ask(), values=1, state=TrialState.PRUNED)
# `value` must be None for `TrialState.FAIL`
with pytest.raises(ValueError):
study.tell(study.ask(), values=1, state=TrialState.FAIL)
# Trial that has not been asked for cannot be told.
with pytest.raises(ValueError):
study.tell(study.ask().number + 1, 1.0)
# Waiting trial cannot be told.
with pytest.raises(ValueError):
study.enqueue_trial({})
study.tell(study.trials[-1].number, 1.0)
# It must be Trial or int for trial.
with pytest.raises(TypeError):
study.tell("1", 1.0) # type: ignore
|
57,865 |
def get_files_from_github(username: str, branch: str, pr_number: str) -> List[str]:
"""
Write the changed files content repo
Args:
username: The username of the contributor (e.g. demisto / xsoar-bot)
branch: The contributor branch
pr_number: The contrib PR
Returns:
A list of packs names, if found.
"""
content_path = os.getcwd()
files_list = set()
chunk_size = 1024 * 500 # 500 Kb
base_url = f'https://raw.githubusercontent.com/{username}/content/{branch}'
for file_path in get_pr_files(pr_number):
file_path_parts = file_path.split('/')
file_dir = os.path.sep.join(file_path_parts[:-1])
abs_dir = os.path.join(content_path, file_dir)
if not os.path.isdir(abs_dir):
os.makedirs(abs_dir)
with open(os.path.join(content_path, file_path), 'wb') as changed_file:
with requests.get(urljoin(base_url, file_path), stream=True) as file_content:
for data in file_content.iter_content(chunk_size=chunk_size):
changed_file.write(data)
files_list.add(file_path_parts[1])
return list(files_list)
|
def get_files_from_github(username: str, branch: str, pr_number: str) -> List[str]:
"""
Write the changed files content repo
Args:
username: The username of the contributor (e.g. demisto / xsoar-bot)
branch: The contributor branch
pr_number: The contrib PR
Returns:
A list of packs names, if found.
"""
content_path = os.getcwd()
files_list = set()
chunk_size = 1024 * 500 # 500 Kb
base_url = f'https://raw.githubusercontent.com/{username}/content/{branch}'
for file_path in get_pr_files(pr_number):
full_file_path = os.path.join(content_path, file_path)
file_dir = os.path.dirname(full_file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
with open(full_file_path, 'wb') as changed_file:
with requests.get(urljoin(base_url, file_path), stream=True) as file_content:
for data in file_content.iter_content(chunk_size=chunk_size):
changed_file.write(data)
files_list.add(file_path_parts[1])
return list(files_list)
|
25,548 |
def test_handle_offchain_secretreveal_after_lock_expired():
"""Test that getting the secret revealed after lock expiration for the
target does not end up continuoysly emitting EventUnlockClaimFailed
Target part for https://github.com/raiden-network/raiden/issues/3086
"""
amount = 3
block_number = 1
expiration = block_number + factories.UNIT_REVEAL_TIMEOUT
initiator = factories.HOP1
our_address = factories.ADDR
secret = factories.UNIT_SECRET
pseudo_random_generator = random.Random()
channel_state, state = make_target_state(
our_address,
amount,
block_number,
initiator,
expiration,
)
lock_expiration = state.transfer.lock.expiration
lock_expiration_block_number = lock_expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS * 2
lock_expiration_block = Block(
block_number=lock_expiration_block_number,
gas_limit=1,
block_hash=factories.make_transaction_hash(),
)
iteration = target.state_transition(
target_state=state,
state_change=lock_expiration_block,
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number,
)
state = iteration.new_state
msg = 'At the expiration block we should get an EventUnlockClaimFailed'
assert must_contain_entry(iteration.events, EventUnlockClaimFailed, {}), msg
iteration = target.state_transition(
target_state=state,
state_change=ReceiveSecretReveal(secret, initiator),
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number + 1,
)
state = iteration.new_state
next_block = Block(
block_number=lock_expiration_block_number + 1,
gas_limit=1,
block_hash=factories.make_transaction_hash(),
)
iteration = target.state_transition(
target_state=state,
state_change=next_block,
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number + 1,
)
msg = 'At the next block we should not get the same event'
assert not must_contain_entry(iteration.events, EventUnlockClaimFailed, {}), msg
|
def test_handle_offchain_secretreveal_after_lock_expired():
"""Test that getting the secret revealed after lock expiration for the
target does not end up continuously emitting EventUnlockClaimFailed
Target part for https://github.com/raiden-network/raiden/issues/3086
"""
amount = 3
block_number = 1
expiration = block_number + factories.UNIT_REVEAL_TIMEOUT
initiator = factories.HOP1
our_address = factories.ADDR
secret = factories.UNIT_SECRET
pseudo_random_generator = random.Random()
channel_state, state = make_target_state(
our_address,
amount,
block_number,
initiator,
expiration,
)
lock_expiration = state.transfer.lock.expiration
lock_expiration_block_number = lock_expiration + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS * 2
lock_expiration_block = Block(
block_number=lock_expiration_block_number,
gas_limit=1,
block_hash=factories.make_transaction_hash(),
)
iteration = target.state_transition(
target_state=state,
state_change=lock_expiration_block,
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number,
)
state = iteration.new_state
msg = 'At the expiration block we should get an EventUnlockClaimFailed'
assert must_contain_entry(iteration.events, EventUnlockClaimFailed, {}), msg
iteration = target.state_transition(
target_state=state,
state_change=ReceiveSecretReveal(secret, initiator),
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number + 1,
)
state = iteration.new_state
next_block = Block(
block_number=lock_expiration_block_number + 1,
gas_limit=1,
block_hash=factories.make_transaction_hash(),
)
iteration = target.state_transition(
target_state=state,
state_change=next_block,
channel_state=channel_state,
pseudo_random_generator=pseudo_random_generator,
block_number=lock_expiration_block_number + 1,
)
msg = 'At the next block we should not get the same event'
assert not must_contain_entry(iteration.events, EventUnlockClaimFailed, {}), msg
|
46,523 |
def test_finality_from_genesis_rule_4(state):
test_state = deepcopy(state)
blocks = []
for epoch in range(6):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
if epoch == 0:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 1:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 2:
check_finality(test_state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finaliy
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return state, blocks, test_state
|
def test_finality_from_genesis_rule_4(state):
test_state = deepcopy(state)
blocks = []
for epoch in range(4):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
if epoch == 0:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 1:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 2:
check_finality(test_state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finaliy
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return state, blocks, test_state
|
40 |
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
return ''
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
14,882 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
hass_device = OpenhomeDevice(hass, device)
add_entities([hass_device], True)
hass.data[DATA_OPENHOME].append(hass_device)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
entity = OpenhomeDevice(hass, device)
add_entities([hass_device], True)
hass.data[DATA_OPENHOME].append(hass_device)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
42,339 |
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an Ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
20,026 |
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1,axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
def image_fusion(img1, img2, wvs1, wvs2, array_type = None, filename = None):
""" Fuse two images of the same size together with given wavelengths representing and make a Spectral_data instance
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
filename: (optional) desired filename of the fused array
:param img1: np.ndarray
:param img2: np.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:param filename: str
:return: fused_array (a Spectral_data instance)
"""
if len(img1.shape) == 2:
img1 = np.expand_dims(img1, axis=2)
r1, c1, b1 = img1.shape
if len(img2.shape) == 2:
img2 = np.expand_dims(img2,axis=2)
r2, c2, b2 = img2.shape
if (r1,c1) != (r2,c2):
fatal_error("Input images should have the same image size")
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:,:,ind]
array_data = (array_data / 255).astype(np.float32)
max_pixel = float(np.amax(array_data))
min_pixel = float(np.amin(array_data))
d_type = array_data.dtype
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=max_pixel, min_value=min_pixel,
d_type=d_type,
wavelength_dict=wavelength_dict, samples=int(r * c),
lines=int(b), interleave="bil",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename=filename, default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
|
24,934 |
def _write_message_page(messages_dict: MessagesDict) -> None:
"""Create or overwrite the file for each message."""
for category, messages in messages_dict.items():
category_dir = PYLINT_MESSAGES_PATH / category
if not category_dir.exists():
category_dir.mkdir(parents=True, exist_ok=True)
for message in messages:
messages_file = os.path.join(category_dir, f"{message.name}.rst")
with open(messages_file, "w", encoding="utf-8") as stream:
stream.write(
f""".. _{message.name}:
{get_rst_title(f"{message.name} / {message.id}", "=")}
**Message emitted:**
{message.definition.msg}
**Description:**
*{message.definition.description}*
{message.good_code}
{message.bad_code}
{message.details}
{message.related_links}
"""
)
if message.checker_module.startswith("pylint.extensions."):
stream.write(
f"""
.. note::
This check requires ``{message.checker_module}`` plugin to be loaded. See: :ref:`{message.checker_module}`
"""
)
stream.write(f"""Created by ``{message.checker}`` checker""")
|
def _write_message_page(messages_dict: MessagesDict) -> None:
"""Create or overwrite the file for each message."""
for category, messages in messages_dict.items():
category_dir = PYLINT_MESSAGES_PATH / category
if not category_dir.exists():
category_dir.mkdir(parents=True, exist_ok=True)
for message in messages:
messages_file = os.path.join(category_dir, f"{message.name}.rst")
with open(messages_file, "w", encoding="utf-8") as stream:
stream.write(
f""".. _{message.name}:
{get_rst_title(f"{message.name} / {message.id}", "=")}
**Message emitted:**
{message.definition.msg}
**Description:**
*{message.definition.description}*
{message.good_code}
{message.bad_code}
{message.details}
{message.related_links}
"""
)
if message.checker_module.startswith("pylint.extensions."):
stream.write(
f"""
.. note::
This check requires the ``{message.checker_module}`` plugin to be loaded. See: :ref:`{message.checker_module}`
"""
)
stream.write(f"""Created by ``{message.checker}`` checker""")
|
33,516 |
def save_for_retrospection(id: str, region: str, **kwargs: Dict[str, Any]):
"""Save a message for retrospection.
The email is saved to filesystem and is also made accessible via a service endpoint.
kwargs should consist of following keys related to the email:
- Body
- Destinations
- RawData
- Source
- Subject
- Template
- TemplateData
"""
ses_dir = os.path.join(config.dirs.data or config.dirs.tmp, "ses")
mkdir(ses_dir)
path = os.path.join(ses_dir, id + ".json")
email = {"Id": id, "Region": region, **kwargs}
EMAILS[id] = email
def _serialize(obj):
"""JSON serializer for timestamps."""
if isinstance(obj, (datetime, date, time)):
return obj.isoformat()
return obj.__dict__
with open(path, "w") as f:
f.write(json.dumps(email, default=_serialize))
LOGGER.debug(f"Email saved at: {path}")
|
def save_for_retrospection(id: str, region: str, **kwargs: Dict[str, Any]):
"""Save a message for retrospection.
The email is saved to filesystem and is also made accessible via a service endpoint.
kwargs should consist of following keys related to the email:
- Body
- Destinations
- RawData
- Source
- Subject
- Template
- TemplateData
"""
ses_dir = os.path.join(config.dirs.data or config.dirs.tmp, "ses")
mkdir(ses_dir)
path = os.path.join(ses_dir, id + ".json")
email = {"Id": id, "Region": region, **kwargs}
EMAILS[id] = email
def _serialize(obj):
"""JSON serializer for timestamps."""
if isinstance(obj, (datetime, date, time)):
return obj.isoformat()
return obj.__dict__
with open(path, "w") as f:
f.write(json.dumps(email, default=_serialize))
LOGGER.debug("Email saved at: %s", path)
|
19,336 |
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
|
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
continue
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
|
30,799 |
def main():
params = demisto.params()
args = demisto.args()
proxies = handle_proxy()
verify = not params.get('insecure')
base_url = params.get('url')
client = Client(base_url, verify=verify, proxy=proxies)
command = demisto.command()
demisto.info(f"Command being executed is {command}")
try:
commands = {
'spacex-get-company-info': get_company_info_command,
'spacex-get-api-info': get_api_info_command,
'spacex-get-launches': get_launches_command,
'spacex-get-upcoming-launches': get_upcoming_launches_command,
'spacex-get-launch-details': get_launch_details_command,
'spacex-get-next-launch': get_next_launch_command,
'spacex-get-launch-images': get_launch_images_command,
'spacex-get-landing-pads': get_all_landing_pads_command,
'spacex-get-landing-pad': get_landing_pad_command,
'spacex-get-roadster': get_roadster_data_command,
'spacex-get-missions': get_all_missions_command,
'spacex-get-mission': get_mission_command,
'spacex-get-launch-pads': get_launch_pads_command,
'spacex-get-launch-pad': get_launch_pad_command,
'spacex-get-ships': get_ships_command,
'spacex-get-ship': get_ship_command,
'spacex-get-capsules': get_capsules_command,
'spacex-get-capsule': get_capsule_command,
'spacex-get-upcoming-capsules': get_upcoming_capsules_command,
'spacex-get-past-capsules': get_past_capsules_command,
'spacex-get-cores': get_cores_command,
'spacex-get-core': get_core_command,
'spacex-get-upcoming-cores': get_upcoming_cores_command,
'spacex-get-past-cores': get_past_cores_command,
'spacex-get-dragons': get_dragons_command,
'spacex-get-dragon': get_dragon_command,
'spacex-get-historical-events': get_historical_events_command,
'spacex-get-historical-event': get_historical_event_command,
'spacex-get-payloads': get_payloads_command,
'spacex-get-payload': get_payload_command,
'spacex-get-rockets': get_rockets_command,
'spacex-get-rocket': get_rocket_command,
}
if command == 'fetch-incidents':
fetch_incidents_command(client, params)
elif command == 'test-module':
test_module(client, params)
elif command == 'get-mapping-fields':
demisto.results(get_mapping_fields_command(client, args, params))
elif command == 'get-remote-data':
demisto.results(get_remote_data_command(client, args, params))
elif command == 'update-remote-system':
demisto.results(update_remote_system_command(client, args, params))
elif command in commands:
commands[command](client, args)
else:
return_error(f"{command} not recognised")
except Exception as err:
return_error(str(err))
|
def main():
params = demisto.params()
args = demisto.args()
proxies = handle_proxy()
verify = not params.get('insecure')
base_url = params.get('url')
client = Client(base_url, verify=verify, proxy=proxies)
command = demisto.command()
demisto.info(f"Command being executed is {command}")
try:
commands = {
'spacex-get-company-info': get_company_info_command,
'spacex-get-api-info': get_api_info_command,
'spacex-get-launches': get_launches_command,
'spacex-get-upcoming-launches': get_upcoming_launches_command,
'spacex-get-launch-details': get_launch_details_command,
'spacex-get-next-launch': get_next_launch_command,
'spacex-get-launch-images': get_launch_images_command,
'spacex-get-landing-pads': get_all_landing_pads_command,
'spacex-get-landing-pad': get_landing_pad_command,
'spacex-get-roadster': get_roadster_data_command,
'spacex-get-missions': get_all_missions_command,
'spacex-get-mission': get_mission_command,
'spacex-get-launch-pads': get_launch_pads_command,
'spacex-get-launch-pad': get_launch_pad_command,
'spacex-get-ships': get_ships_command,
'spacex-get-ship': get_ship_command,
'spacex-get-capsules': get_capsules_command,
'spacex-get-capsule': get_capsule_command,
'spacex-get-upcoming-capsules': get_upcoming_capsules_command,
'spacex-get-past-capsules': get_past_capsules_command,
'spacex-get-cores': get_cores_command,
'spacex-get-core': get_core_command,
'spacex-get-upcoming-cores': get_upcoming_cores_command,
'spacex-get-past-cores': get_past_cores_command,
'spacex-get-dragons': get_dragons_command,
'spacex-get-dragon': get_dragon_command,
'spacex-get-historical-events': get_historical_events_command,
'spacex-get-historical-event': get_historical_event_command,
'spacex-get-payloads': get_payloads_command,
'spacex-get-payload': get_payload_command,
'spacex-get-rockets': get_rockets_command,
'spacex-get-rocket': get_rocket_command,
}
if command == 'fetch-incidents':
fetch_incidents_command(client, params)
elif command == 'test-module':
test_module(client, params)
elif command == 'get-mapping-fields':
demisto.results(get_mapping_fields_command(client, args, params))
elif command == 'get-remote-data':
demisto.results(get_remote_data_command(client, args, params))
elif command == 'update-remote-system':
demisto.results(update_remote_system_command(client, args, params))
elif command in commands:
commands[command](client, args)
else:
return_error(f"{command} does not exist in SpaceX integration.")
except Exception as err:
return_error(str(err))
|
29,524 |
def _check_tcpdump():
"""
Return True if the tcpdump command can be started
"""
try:
proc = subprocess.Popen(
[conf.prog.tcpdump, "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output = proc.communicate()[0]
except OSError:
return False
# tcpdump acts strangely on some OSes and returns 1
# therefore we also checks the output
return b"tcpdump" in output or proc.returncode == 0
|
def _check_tcpdump():
"""
Return True if the tcpdump command can be started
"""
try:
proc = subprocess.Popen(
[conf.prog.tcpdump, "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output = proc.communicate()[0]
except OSError:
return False
# tcpdump acts strangely on some OSes and returns 1
# therefore we also checks the output
return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')
|
5,513 |
def test_user_edit_websites(wiki_user, wiki_user_github_account, user_client):
url = reverse("users.user_edit", args=(wiki_user.username,))
response = user_client.get(url)
assert response.status_code == 200
assert_no_cache_header(response)
doc = pq(response.content)
test_sites = {
"twitter": "http://twitter.com/lmorchard",
"stackoverflow": "http://stackoverflow.com/users/lmorchard",
"linkedin": "https://www.linkedin.com/in/testuser",
"pmo": "https://people.mozilla.org/u/testuser",
"facebook": "https://www.facebook.com/test.user",
}
form = _get_current_form_field_values(doc)
# Fill out the form with websites.
form.update(dict(("user-%s_url" % k, v) for k, v in test_sites.items()))
# Filter out keys with `None` values
form = {k: v for k, v in form.items() if v is not None}
# Submit the form, verify redirect to user detail
response = user_client.post(url, form, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc.find("#user-head").length == 1
wiki_user.refresh_from_db()
# Verify the websites are saved in the user.
for site, site_url in test_sites.items():
url_attr_name = "%s_url" % site
assert getattr(wiki_user, url_attr_name) == site_url
# Verify the saved websites appear in the editing form
response = user_client.get(url)
assert response.status_code == 200
doc = pq(response.content)
for k, v in test_sites.items():
assert doc.find('#user-edit *[name="user-%s_url"]' % k).val() == v
# Github is not an editable field
github_div = doc.find("#field_github_url div.field-account")
github_acct = wiki_user.socialaccount_set.get()
assert to_html(github_div).strip() == github_acct.get_profile_url()
# Come up with some bad sites, either invalid URL or bad URL prefix
bad_sites = {
"linkedin": "HAHAHA WHAT IS A WEBSITE",
"twitter": "http://facebook.com/lmorchard",
"stackoverflow": "http://overqueueblah.com/users/lmorchard",
}
form.update(dict(("user-%s_url" % k, v) for k, v in bad_sites.items()))
# Submit the form, verify errors for all of the bad sites
response = user_client.post(url, form, follow=True)
doc = pq(response.content)
assert doc.find("#user-edit").length == 1
tmpl = "#user-edit #users .%s .errorlist"
for n in ("linkedin", "twitter", "stackoverflow"):
assert doc.find(tmpl % n).length == 1
|
def test_user_edit_websites(wiki_user, wiki_user_github_account, user_client):
url = reverse("users.user_edit", args=(wiki_user.username,))
response = user_client.get(url)
assert response.status_code == 200
assert_no_cache_header(response)
doc = pq(response.content)
test_sites = {
"twitter": "http://twitter.com/lmorchard",
"stackoverflow": "http://stackoverflow.com/users/lmorchard",
"linkedin": "https://www.linkedin.com/in/testuser",
"pmo": "https://people.mozilla.org/p/testuser",
"facebook": "https://www.facebook.com/test.user",
}
form = _get_current_form_field_values(doc)
# Fill out the form with websites.
form.update(dict(("user-%s_url" % k, v) for k, v in test_sites.items()))
# Filter out keys with `None` values
form = {k: v for k, v in form.items() if v is not None}
# Submit the form, verify redirect to user detail
response = user_client.post(url, form, follow=True)
assert response.status_code == 200
doc = pq(response.content)
assert doc.find("#user-head").length == 1
wiki_user.refresh_from_db()
# Verify the websites are saved in the user.
for site, site_url in test_sites.items():
url_attr_name = "%s_url" % site
assert getattr(wiki_user, url_attr_name) == site_url
# Verify the saved websites appear in the editing form
response = user_client.get(url)
assert response.status_code == 200
doc = pq(response.content)
for k, v in test_sites.items():
assert doc.find('#user-edit *[name="user-%s_url"]' % k).val() == v
# Github is not an editable field
github_div = doc.find("#field_github_url div.field-account")
github_acct = wiki_user.socialaccount_set.get()
assert to_html(github_div).strip() == github_acct.get_profile_url()
# Come up with some bad sites, either invalid URL or bad URL prefix
bad_sites = {
"linkedin": "HAHAHA WHAT IS A WEBSITE",
"twitter": "http://facebook.com/lmorchard",
"stackoverflow": "http://overqueueblah.com/users/lmorchard",
}
form.update(dict(("user-%s_url" % k, v) for k, v in bad_sites.items()))
# Submit the form, verify errors for all of the bad sites
response = user_client.post(url, form, follow=True)
doc = pq(response.content)
assert doc.find("#user-edit").length == 1
tmpl = "#user-edit #users .%s .errorlist"
for n in ("linkedin", "twitter", "stackoverflow"):
assert doc.find(tmpl % n).length == 1
|
29,353 |
def main(args: Optional[Sequence[str]]=None) -> None:
"""Runs the script to clean temporary and installed files."""
unused_parsed_args = _PARSER.parse_args(args=args)
delete_directory_tree(OPPIA_TOOLS_DIR)
delete_directory_tree('node_modules/')
delete_directory_tree('third_party/')
delete_directory_tree('build/')
delete_directory_tree('backend_prod_files/')
delete_file('.coverage')
delete_directory_tree('local_compiled_js/')
delete_directory_tree('local_compiled_js_for_test/')
delete_directory_tree('readme_test_dir/')
delete_file('tsc_output_log.txt')
delete_file('dev_output.txt')
delete_file('.viminfo')
for filename in os.listdir(CURR_DIR):
if filename.startswith('tmpcompiledjs'):
delete_directory_tree(filename)
print('Temporary and installed files deleted')
|
def main(args: Optional[Sequence[str]] = None) -> None:
"""Runs the script to clean temporary and installed files."""
unused_parsed_args = _PARSER.parse_args(args=args)
delete_directory_tree(OPPIA_TOOLS_DIR)
delete_directory_tree('node_modules/')
delete_directory_tree('third_party/')
delete_directory_tree('build/')
delete_directory_tree('backend_prod_files/')
delete_file('.coverage')
delete_directory_tree('local_compiled_js/')
delete_directory_tree('local_compiled_js_for_test/')
delete_directory_tree('readme_test_dir/')
delete_file('tsc_output_log.txt')
delete_file('dev_output.txt')
delete_file('.viminfo')
for filename in os.listdir(CURR_DIR):
if filename.startswith('tmpcompiledjs'):
delete_directory_tree(filename)
print('Temporary and installed files deleted')
|
14,463 |
def run_ansible_lint(
cwd: os.PathLike = None,
role_path: os.PathLike = None,
bin: str = None,
env: Dict = None):
"""Run ansible-lint on a given path and returns its output.
If "." is passed to cwd, the root of the ansible-lint source repository will be used.
"""
command = '{} -v {}'.format(
bin or (sys.executable + " -m ansiblelint"),
role_path or "")
if cwd == ".":
cwd = os.Path(__file__).parents[1]
result, err = subprocess.Popen(
[command],
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
).communicate()
return result
|
def run_ansible_lint(
cwd: os.PathLike = None,
role_path: os.PathLike = None,
bin: str = None,
env: Dict[str, str] = None):
"""Run ansible-lint on a given path and returns its output.
If "." is passed to cwd, the root of the ansible-lint source repository will be used.
"""
command = '{} -v {}'.format(
bin or (sys.executable + " -m ansiblelint"),
role_path or "")
if cwd == ".":
cwd = os.Path(__file__).parents[1]
result, err = subprocess.Popen(
[command],
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
).communicate()
return result
|
6,579 |
def repost_future_sle(args=None, doc=None, voucher_type=None, voucher_no=None, allow_negative_stock=None, via_landed_cost_voucher=False):
if not args and voucher_type and voucher_no:
args = get_items_to_be_repost(voucher_type, voucher_no, doc)
distinct_item_warehouses = get_distinct_item_warehouse(args, doc)
i = get_current_index(doc) or 0
while i < len(args):
obj = update_entries_after({
"item_code": args[i].get('item_code'),
"warehouse": args[i].get('warehouse'),
"posting_date": args[i].get('posting_date'),
"posting_time": args[i].get('posting_time'),
"creation": args[i].get("creation"),
"distinct_item_warehouses": distinct_item_warehouses
}, allow_negative_stock=allow_negative_stock, via_landed_cost_voucher=via_landed_cost_voucher)
distinct_item_warehouses[(args[i].get('item_code'), args[i].get('warehouse'))].reposting_status = True
if obj.new_items_found:
for item_wh, data in iteritems(distinct_item_warehouses):
if ('args_idx' not in data and not data.reposting_status) or (data.sle_changed and data.reposting_status):
data.args_idx = len(args)
args.append(data.sle)
elif data.sle_changed and not data.reposting_status:
args[data.args_idx] = data.sle
data.sle_changed = False
i += 1
if doc and i % 2 == 0:
update_args_in_repost_item_valuation(doc, i, args, distinct_item_warehouses)
if doc and args:
update_args_in_repost_item_valuation(doc, i, args, distinct_item_warehouses)
|
def repost_future_sle(args=None, voucher_type=None, voucher_no=None, allow_negative_stock=None, via_landed_cost_voucher=False, doc=None):
if not args and voucher_type and voucher_no:
args = get_items_to_be_repost(voucher_type, voucher_no, doc)
distinct_item_warehouses = get_distinct_item_warehouse(args, doc)
i = get_current_index(doc) or 0
while i < len(args):
obj = update_entries_after({
"item_code": args[i].get('item_code'),
"warehouse": args[i].get('warehouse'),
"posting_date": args[i].get('posting_date'),
"posting_time": args[i].get('posting_time'),
"creation": args[i].get("creation"),
"distinct_item_warehouses": distinct_item_warehouses
}, allow_negative_stock=allow_negative_stock, via_landed_cost_voucher=via_landed_cost_voucher)
distinct_item_warehouses[(args[i].get('item_code'), args[i].get('warehouse'))].reposting_status = True
if obj.new_items_found:
for item_wh, data in iteritems(distinct_item_warehouses):
if ('args_idx' not in data and not data.reposting_status) or (data.sle_changed and data.reposting_status):
data.args_idx = len(args)
args.append(data.sle)
elif data.sle_changed and not data.reposting_status:
args[data.args_idx] = data.sle
data.sle_changed = False
i += 1
if doc and i % 2 == 0:
update_args_in_repost_item_valuation(doc, i, args, distinct_item_warehouses)
if doc and args:
update_args_in_repost_item_valuation(doc, i, args, distinct_item_warehouses)
|
24,141 |
def create_trello_card(pull_request_event):
querystring = {
"idList": os.environ[TRELLO_LIST_ID],
"keepFromSource":"all",
"name": pull_request_event.get('title'),
"desc": f"PR URL: {pull_request_event.get('url')} \n {pull_request_event.get('body', '')[:5000]}",
"key": os.environ[TRELLO_KEY_ENV_VAR],
"token": os.environ[TRELLO_TOKEN_ENV_VAR]
}
response = requests.post(TRELLO_API_URL, params=querystring)
response.raise_for_status()
|
def create_trello_card(pull_request_event):
querystring = {
"idList": os.environ[TRELLO_LIST_ID],
"keepFromSource":"all",
"name": pull_request_event.get('title'),
"desc": f"PR URL: {pull_request_event.get('url')}\n\n{pull_request_event.get('body', '')[:5000]}",
"key": os.environ[TRELLO_KEY_ENV_VAR],
"token": os.environ[TRELLO_TOKEN_ENV_VAR]
}
response = requests.post(TRELLO_API_URL, params=querystring)
response.raise_for_status()
|
49,062 |
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not is_random(expr): # expr isn't random?
return expr
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Expectation
if evaluate:
return Expectation(expr, condition).doit(**kwargs)
message = ("Using `evaluate=False` now returns `Expectation` object "
"since version 1.7. If you want unevaluated Integral/Sum use "
"`E(expr, condition, evaluate=False).rewrite(Integral)`")
warnings.warn(filldedent(message))
return Expectation(expr, condition)
|
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs):
"""
Returns the expected value of a random expression
Parameters
==========
expr : Expr containing RandomSymbols
The expression of which you want to compute the expectation value
given : Expr containing RandomSymbols
A conditional expression. E(X, X>0) is expectation of X given X > 0
numsamples : int
Enables sampling and approximates the expectation with this many samples
evalf : Bool (defaults to True)
If sampling return a number rather than a complex expression
evaluate : Bool (defaults to True)
In case of continuous systems return unevaluated integral
Examples
========
>>> from sympy.stats import E, Die
>>> X = Die('X', 6)
>>> E(X)
7/2
>>> E(2*X + 1)
8
>>> E(X, X > 3) # Expectation of X given that it is above 3
5
"""
if not is_random(expr): # expr isn't random?
return expr
kwargs['numsamples'] = numsamples
from sympy.stats.symbolic_probability import Expectation
if evaluate:
return Expectation(expr, condition).doit(**kwargs)
message = ("Since version 1.7, using `evaluate=False` returns `Expectation` object. "
"If you want unevaluated Integral/Sum use "
"`E(expr, condition, evaluate=False).rewrite(Integral)`")
warnings.warn(filldedent(message))
return Expectation(expr, condition)
|
13,693 |
def _get_and_map_code_owner(row, owner_map):
"""
From a csv row, takes the theme and squad, create ownership maps, and return the code_owner.
Will also warn if the squad appears in multiple themes.
Arguments:
row: A csv row that should have 'owner.theme' and 'owner.squad'.
owner_map: A dict with 'theme_to_owners_map' and 'squad_to_theme_map' keys.
Returns:
The code_owner for the row. This is made from the theme+squad (or squad if there is no theme).
"""
theme = row.get('owner.theme')
squad = row.get('owner.squad')
assert squad, 'Csv row is missing required owner.squad: %s' % row
# use lower case names only
squad = squad.lower()
if theme:
theme = theme.lower()
owner = '{}-{}'.format(theme, squad) if theme else squad
theme = theme or squad
if squad not in owner_map['squad_to_theme_map']:
# store the theme for each squad for a later data integrity check
owner_map['squad_to_theme_map'][squad] = theme
# add to the list of owners for each theme
if theme not in owner_map['theme_to_owners_map']:
owner_map['theme_to_owners_map'][theme] = []
owner_map['theme_to_owners_map'][theme].append(owner)
# assert that squads have a unique theme. otherwise we have a data integrity issues in the csv.
assert owner_map['squad_to_theme_map'][squad] == theme, \
'Squad %s is associated with theme %s in row %s, but theme %s elsewhere in the csv.' % \
(squad, theme, row, owner_map['squad_to_theme_map'][squad])
return owner
|
def _get_and_map_code_owner(row, owner_map):
"""
From a csv row, takes the theme and squad, update ownership maps, and return the code_owner.
Will also warn if the squad appears in multiple themes.
Arguments:
row: A csv row that should have 'owner.theme' and 'owner.squad'.
owner_map: A dict with 'theme_to_owners_map' and 'squad_to_theme_map' keys.
Returns:
The code_owner for the row. This is made from the theme+squad (or squad if there is no theme).
"""
theme = row.get('owner.theme')
squad = row.get('owner.squad')
assert squad, 'Csv row is missing required owner.squad: %s' % row
# use lower case names only
squad = squad.lower()
if theme:
theme = theme.lower()
owner = '{}-{}'.format(theme, squad) if theme else squad
theme = theme or squad
if squad not in owner_map['squad_to_theme_map']:
# store the theme for each squad for a later data integrity check
owner_map['squad_to_theme_map'][squad] = theme
# add to the list of owners for each theme
if theme not in owner_map['theme_to_owners_map']:
owner_map['theme_to_owners_map'][theme] = []
owner_map['theme_to_owners_map'][theme].append(owner)
# assert that squads have a unique theme. otherwise we have a data integrity issues in the csv.
assert owner_map['squad_to_theme_map'][squad] == theme, \
'Squad %s is associated with theme %s in row %s, but theme %s elsewhere in the csv.' % \
(squad, theme, row, owner_map['squad_to_theme_map'][squad])
return owner
|
6,936 |
def safe_enqueue(cmd, queue='default', job_name=None, **kwargs):
'''
Enqueue method to be executed using a background worker if not already queued
:param cmd: whitelised method or server script api method
:param queue: should be either long, default or short
:param job_name: used to identify an enqueue call, used to prevent duplicate calls
'''
if not job_name:
job_name = cmd
site = frappe.local.site
queued_jobs = get_jobs(site=site, queue=queue, key='job_name').get(site) or []
if job_name not in queued_jobs:
enqueue(
'frappe.utils.safe_exec.execute_enqueued_cmd',
cmd=cmd, job_name=job_name, queue=queue, **kwargs
)
|
def safe_enqueue(cmd, queue='default', job_name=None, **kwargs):
'''
Enqueue method to be executed using a background worker if not already queued
:param cmd: whitelised method or server script api method
:param queue: should be either long, default or short
:param job_name: used to identify an enqueue call, used to prevent duplicate calls
'''
job_name = job_name or cmd
site = frappe.local.site
queued_jobs = get_jobs(site=site, queue=queue, key='job_name').get(site) or []
if job_name not in queued_jobs:
enqueue(
'frappe.utils.safe_exec.execute_enqueued_cmd',
cmd=cmd, job_name=job_name, queue=queue, **kwargs
)
|
5,310 |
def creds(provider):
'''
Return the credentials for AWS signing. This could be just the id and key
specified in the provider configuration, or if the id or key is set to the
literal string 'use-instance-role-credentials' creds will pull the instance
role credentials from the meta data, cache them, and provide them instead.
'''
# Declare globals
global __AccessKeyId__, __SecretAccessKey__, __Token__, __Expiration__
ret_credentials = ()
# if id or key is 'use-instance-role-credentials', pull them from meta-data
## if needed
if provider['id'] == IROLE_CODE or provider['key'] == IROLE_CODE:
# Check to see if we have cache credentials that are still good
if __Expiration__ == '' or datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') > __Expiration__:
# We don't have any cached credentials, or they are expired, get them
# Connections to instance meta-data must fail fast and never be proxied
try:
result = requests.get(
"http://169.254.169.254/latest/meta-data/iam/security-credentials/",
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
result.raise_for_status()
role = result.text
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider['id'], provider['key'], ''
try:
result = requests.get(
"http://169.254.169.254/latest/meta-data/iam/security-credentials/{0}".format(role),
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
result.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider['id'], provider['key'], ''
data = result.json()
__AccessKeyId__ = data['AccessKeyId']
__SecretAccessKey__ = data['SecretAccessKey']
__Token__ = data['Token']
__Expiration__ = data['Expiration']
ret_credentials = __AccessKeyId__, __SecretAccessKey__, __Token__
else:
ret_credentials = provider['id'], provider['key'], ''
if provider.get('role_arn') is not None:
provider_shadow = provider.copy()
provider_shadow.pop("role_arn", None)
log.info("Assuming the role: %s", provider.get('role_arn'))
ret_credentials = assumed_creds(provider_shadow, role_arn=provider.get('role_arn'), location='us-east-1')
return ret_credentials
|
def creds(provider):
'''
Return the credentials for AWS signing. This could be just the id and key
specified in the provider configuration, or if the id or key is set to the
literal string 'use-instance-role-credentials' creds will pull the instance
role credentials from the meta data, cache them, and provide them instead.
'''
# Declare globals
global __AccessKeyId__, __SecretAccessKey__, __Token__, __Expiration__
ret_credentials = ()
# if id or key is 'use-instance-role-credentials', pull them from meta-data
## if needed
if provider['id'] == IROLE_CODE or provider['key'] == IROLE_CODE:
# Check to see if we have cache credentials that are still good
if not __Expiration__ or __Expiration__ < datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'):
# We don't have any cached credentials, or they are expired, get them
# Connections to instance meta-data must fail fast and never be proxied
try:
result = requests.get(
"http://169.254.169.254/latest/meta-data/iam/security-credentials/",
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
result.raise_for_status()
role = result.text
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider['id'], provider['key'], ''
try:
result = requests.get(
"http://169.254.169.254/latest/meta-data/iam/security-credentials/{0}".format(role),
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
result.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return provider['id'], provider['key'], ''
data = result.json()
__AccessKeyId__ = data['AccessKeyId']
__SecretAccessKey__ = data['SecretAccessKey']
__Token__ = data['Token']
__Expiration__ = data['Expiration']
ret_credentials = __AccessKeyId__, __SecretAccessKey__, __Token__
else:
ret_credentials = provider['id'], provider['key'], ''
if provider.get('role_arn') is not None:
provider_shadow = provider.copy()
provider_shadow.pop("role_arn", None)
log.info("Assuming the role: %s", provider.get('role_arn'))
ret_credentials = assumed_creds(provider_shadow, role_arn=provider.get('role_arn'), location='us-east-1')
return ret_credentials
|
45,908 |
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as two 1d-convolutions
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = torch.unsqueeze(get_gaussian_kernel2d(kernel_size, sigma), dim=0)
out = kornia.filter2d(input, kernel, border_type)
return out
|
def gaussian_blur2d(input: torch.Tensor,
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
border_type: str = 'reflect',
separable: bool = True) -> torch.Tensor:
r"""Create an operator that blurs a tensor using a Gaussian filter.
.. image:: _static/img/gaussian_blur2d.png
The operator smooths the given tensor with a gaussian kernel by convolving
it to each channel. It supports batched operation.
Arguments:
input: the input tensor with shape :math:`(B,C,H,W)`.
kernel_size: the size of the kernel.
sigma: the standard deviation of the kernel.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
separable: run as two 1d-convolutions
Returns:
the blurred tensor with shape :math:`(B, C, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
gaussian_blur.html>`__.
Examples:
>>> input = torch.rand(2, 4, 5, 5)
>>> output = gaussian_blur2d(input, (3, 3), (1.5, 1.5))
>>> output.shape
torch.Size([2, 4, 5, 5])
"""
if separable:
kernel_x: torch.Tensor = get_gaussian_kernel1d(kernel_size[1], sigma[1])
kernel_y: torch.Tensor = get_gaussian_kernel1d(kernel_size[0], sigma[0])
out = kornia.filters.separable_filter2d(input, kernel_x[None], kernel_y[None], border_type)
else:
kernel: torch.Tensor = get_gaussian_kernel2d(kernel_size, sigma)
out = kornia.filter2d(input, kernel, border_type)
return out
|
12,326 |
def _write_localized_provider_docs():
for locale in AVAILABLE_LOCALES:
info = _get_localized_provider_info(locale)
with (DOCS_ROOT / 'locales' / '{}.rst'.format(locale)).open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, 'Locale {}'.format(locale))
_write_includes(fh)
for provider_class, standard_provider_name in info:
provider_methods = _get_provider_methods(provider_class)
_write_title(fh, '``{}``'.format(standard_provider_name), level=2)
_write(fh, PROVIDER_AUTODOC_TEMPLATE.format(
provider_class=provider_class,
provider_methods=provider_methods,
))
|
def _write_localized_provider_docs():
for locale in AVAILABLE_LOCALES:
info = _get_localized_provider_info(locale)
with (DOCS_ROOT / 'locales' / f'{locale}.rst').open('wb') as fh:
_hide_edit_on_github(fh)
_write_title(fh, 'Locale {}'.format(locale))
_write_includes(fh)
for provider_class, standard_provider_name in info:
provider_methods = _get_provider_methods(provider_class)
_write_title(fh, '``{}``'.format(standard_provider_name), level=2)
_write(fh, PROVIDER_AUTODOC_TEMPLATE.format(
provider_class=provider_class,
provider_methods=provider_methods,
))
|
22,799 |
def _parse_ssl_options(ssl_options: Optional[str]) -> List[Any]:
if ssl_options is not None:
try:
with io.open(ssl_options, "r", encoding="utf-8") as _file:
return nginxparser.load(_file)
except IOError:
logger.warning("Missing NGINX TLS options file: %s", ssl_options)
except UnicodeDecodeError:
logger.warning("Could not read file: %s due to invalid character. "
"Only UTF-8 encoding is supported.", ssl_options)
except pyparsing.ParseBaseException as err:
logger.warning("Could not parse file: %s due to %s", ssl_options, err)
return []
|
def _parse_ssl_options(ssl_options: Optional[str]) -> Union[UnspacedList, List[Any]]:
if ssl_options is not None:
try:
with io.open(ssl_options, "r", encoding="utf-8") as _file:
return nginxparser.load(_file)
except IOError:
logger.warning("Missing NGINX TLS options file: %s", ssl_options)
except UnicodeDecodeError:
logger.warning("Could not read file: %s due to invalid character. "
"Only UTF-8 encoding is supported.", ssl_options)
except pyparsing.ParseBaseException as err:
logger.warning("Could not parse file: %s due to %s", ssl_options, err)
return []
|
24,140 |
def should_create_card(pull_request_event):
pr_includes_changes = False
pr_is_merged = False
for label in pull_request_event.get('pull_request').get('labels', []):
label = label.get('name', '')
if label.startswith('changelog/') and label != 'changelog/no-changelog':
pr_includes_changes = True
if pull_request_event.get('pull_request').get('merged') and pull_request_event.get('action') == 'closed':
pr_is_merged = True
return pr_includes_changes and pr_is_merged
|
def should_create_card(pull_request_event):
pr_includes_changes = False
pr_is_merged = False
for label in pull_request_event.get('pull_request').get('labels', []):
label = label.get('name', '')
if label.startswith('changelog/') and label != 'changelog/no-changelog':
pr_includes_changes = True
if pull_request_event.get('pull_request', {}).get('merged') and pull_request_event.get('action') == 'closed':
pr_is_merged = True
return pr_includes_changes and pr_is_merged
|
27,213 |
def parse_target(args):
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(
args.target).groups('')
# In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if args.target_ip is None:
args.target_ip = address
if domain is None:
domain = ''
if password == '' and username != '' and args.hashes is None and args.no_pass is False and args.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if args.aesKey is not None:
args.k = True
if args.hashes is not None:
lmhash, nthash = args.hashes.split(':')
else:
lmhash = ''
nthash = ''
return domain, username, password, address, lmhash, nthash
|
def parse_target(args):
domain, username, password, address = utils.parse_target(args.target)
if args.target_ip is None:
args.target_ip = address
if domain is None:
domain = ''
if password == '' and username != '' and args.hashes is None and args.no_pass is False and args.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if args.aesKey is not None:
args.k = True
if args.hashes is not None:
lmhash, nthash = args.hashes.split(':')
else:
lmhash = ''
nthash = ''
return domain, username, password, address, lmhash, nthash
|
36,345 |
def b64encode(s, altchars=None, padded=True):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
If padded is True (the default), padding will be be applied to the
result bytes. If padding if False, no padding is applied.
"""
encoded = binascii.b2a_base64(s, newline=False)
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
encoded = encoded.translate(bytes.maketrans(b'+/', altchars))
if not padded:
encoded = encoded.rstrip(b'=')
return encoded
|
def b64encode(s, altchars=None, padded=True):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
If padded is True (the default), padding will be be applied to the
result bytes. If padding is False, no padding is applied.
"""
encoded = binascii.b2a_base64(s, newline=False)
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
encoded = encoded.translate(bytes.maketrans(b'+/', altchars))
if not padded:
encoded = encoded.rstrip(b'=')
return encoded
|
6,447 |
def create_sample_collection(lab_test, template, patient, invoice):
if(frappe.db.get_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test') == '1'):
sample_collection = create_sample_doc(template, patient, invoice, lab_test.company)
if(sample_collection):
lab_test.sample = sample_collection.name
return lab_test
|
def create_sample_collection(lab_test, template, patient, invoice):
if frappe.db.get_cached_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test'):
sample_collection = create_sample_doc(template, patient, invoice, lab_test.company)
if(sample_collection):
lab_test.sample = sample_collection.name
return lab_test
|
26,064 |
def honor_list_request(
manifest: Manifest, global_config: Namespace
) -> Union[Manifest, int]:
"""If --list was passed, simply list the manifest and exit cleanly.
Args:
manifest (~.Manifest): The manifest of sessions to be run.
global_config (~nox.main.GlobalConfig): The global configuration.
Returns:
Union[~.Manifest,int]: ``0`` if a listing is all that is requested,
the manifest otherwise (to be sent to the next task).
"""
if not global_config.list_sessions:
return manifest
# If the user just asked for a list of sessions, print that
# and any docstring specified in noxfile.py and be done.
if manifest.module_docstring:
print(manifest.module_docstring)
print("Sessions defined in {noxfile}:\n".format(noxfile=global_config.noxfile))
reset = parse_colors("reset") if global_config.color else ""
selected_color = parse_colors("cyan") if global_config.color else ""
skipped_color = parse_colors("white") if global_config.color else ""
for session, selected in manifest.list_all_sessions():
output = "{marker} {color}{session}{reset}"
if selected:
marker = "*"
color = selected_color
else:
marker = "-"
color = skipped_color
if session.description is not None:
output += " -> {description}"
print(
output.format(
color=color,
reset=reset,
session=session.friendly_name,
description=session.description,
marker=marker,
)
)
print(
"\nsessions marked with {selected_color}*{reset} are selected, sessions marked with {skipped_color}-{reset} are skipped.".format(
selected_color=selected_color, skipped_color=skipped_color, reset=reset
)
)
return 0
|
def honor_list_request(
manifest: Manifest, global_config: Namespace
) -> Union[Manifest, int]:
"""If --list was passed, simply list the manifest and exit cleanly.
Args:
manifest (~.Manifest): The manifest of sessions to be run.
global_config (~nox.main.GlobalConfig): The global configuration.
Returns:
Union[~.Manifest,int]: ``0`` if a listing is all that is requested,
the manifest otherwise (to be sent to the next task).
"""
if not global_config.list_sessions:
return manifest
# If the user just asked for a list of sessions, print that
# and any docstring specified in noxfile.py and be done.
if manifest.module_docstring:
print(manifest.module_docstring.strip(), end="\n\n")
print("Sessions defined in {noxfile}:\n".format(noxfile=global_config.noxfile))
reset = parse_colors("reset") if global_config.color else ""
selected_color = parse_colors("cyan") if global_config.color else ""
skipped_color = parse_colors("white") if global_config.color else ""
for session, selected in manifest.list_all_sessions():
output = "{marker} {color}{session}{reset}"
if selected:
marker = "*"
color = selected_color
else:
marker = "-"
color = skipped_color
if session.description is not None:
output += " -> {description}"
print(
output.format(
color=color,
reset=reset,
session=session.friendly_name,
description=session.description,
marker=marker,
)
)
print(
"\nsessions marked with {selected_color}*{reset} are selected, sessions marked with {skipped_color}-{reset} are skipped.".format(
selected_color=selected_color, skipped_color=skipped_color, reset=reset
)
)
return 0
|
42,932 |
def edge_coords(graph: nx.Graph, l: dict) -> dict:
""" Provides the coordinates for the graph edges when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of edges and their respective coordinates
Returns:
dict: x and y coordinates for beginning and end of each edge
"""
e_x = []
e_y = []
for e in graph.edges():
start_x, start_y = l[e[0]]
end_x, end_y = l[e[1]]
e_x.append(start_x)
e_x.append(end_x)
e_y.append(start_y)
e_y.append(end_y)
e_x.append(None)
e_y.append(None)
return {"x": e_x, "y": e_y}
|
def edge_coords(graph: nx.Graph, l: dict) -> dict:
""" Provides the coordinates for the graph edges when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of nodes and their respective coordinates
Returns:
dict: x and y coordinates for beginning and end of each edge
"""
e_x = []
e_y = []
for e in graph.edges():
start_x, start_y = l[e[0]]
end_x, end_y = l[e[1]]
e_x.append(start_x)
e_x.append(end_x)
e_y.append(start_y)
e_y.append(end_y)
e_x.append(None)
e_y.append(None)
return {"x": e_x, "y": e_y}
|
1,968 |
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like of shape (n_samples, n_features)
W : float or dense array-like of shape (n_samples, n_components)
H : float or dense array-like of shape (n_components, n_features)
beta : float or string in {'frobenius', 'kullback-leibler', \
'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : bool, default=False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
|
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like of shape (n_samples, n_features)
W : float or dense array-like of shape (n_samples, n_components)
H : float or dense array-like of shape (n_components, n_features)
beta : float or string in {'frobenius', 'kullback-leibler', \
'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : bool, default=False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
|
28,551 |
def test_rcparams_repr_str():
"""Check both repr and str print all keys."""
repr_str = rcParams.__repr__()
str_str = rcParams.__str__()
assert repr_str[:8] == "RcParams"
for string in (repr_str, str_str):
assert all([key in string for key in rcParams.keys()])
|
def test_rcparams_repr_str():
"""Check both repr and str print all keys."""
repr_str = rcParams.__repr__()
str_str = rcParams.__str__()
assert repr_str.startsWith("RcParams")
for string in (repr_str, str_str):
assert all([key in string for key in rcParams.keys()])
|
45,172 |
def Running(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Running` states.
Returns:
State: a Running state
"""
return schemas.states.Running(cls=cls, **kwargs)
|
def Running(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Running` states.
Returns:
State: a `Running` state
"""
return schemas.states.Running(cls=cls, **kwargs)
|
56,173 |
def fix(
sql: str,
dialect: str = "ansi",
rules: Optional[List[str]] = None,
exclude_rules: Optional[List[str]] = None,
config_path: Optional[str] = None,
) -> str:
"""Fix a SQL string.
Args:
sql (:obj:`str`): The SQL to be fixed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be fixed. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to fix for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to avoid fixing for. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config.
Defaults to None.
Returns:
:obj:`str` for the fixed SQL if possible.
"""
cfg = get_simple_config(
dialect=dialect,
rules=rules,
exclude_rules=exclude_rules,
config_path=config_path,
)
linter = Linter(config=cfg)
result = linter.lint_string_wrapped(sql, fix=True)
fix_even_unparsable = cfg.get("fix_even_unparsable")
should_fix = fix_even_unparsable
if not should_fix:
# If fix_even_unparsable wasn't set, check for parse errors. Okay to fix
# if there are no parse errors.
_, num_filtered_parse_errors = result.check_parse_errors()
if not num_filtered_parse_errors:
should_fix = True
if should_fix:
sql = result.paths[0].files[0].fix_string()[0]
return sql
|
def fix(
sql: str,
dialect: str = "ansi",
rules: Optional[List[str]] = None,
exclude_rules: Optional[List[str]] = None,
config_path: Optional[str] = None,
) -> str:
"""Fix a SQL string.
Args:
sql (:obj:`str`): The SQL to be fixed.
dialect (:obj:`str`, optional): A reference to the dialect of the SQL
to be fixed. Defaults to `ansi`.
rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to fix for. Defaults to None.
exclude_rules (:obj:`Optional[List[str]`, optional): A subset of rule
references to avoid fixing for. Defaults to None.
config_path (:obj:`Optional[str]`, optional): A path to a .sqlfluff config.
Defaults to None.
Returns:
:obj:`str` for the fixed SQL if possible.
"""
cfg = get_simple_config(
dialect=dialect,
rules=rules,
exclude_rules=exclude_rules,
config_path=config_path,
)
linter = Linter(config=cfg)
result = linter.lint_string_wrapped(sql, fix=True)
fix_even_unparsable = cfg.get("fix_even_unparsable")
should_fix = fix_even_unparsable
if not should_fix:
# If fix_even_unparsable wasn't set, check for parse errors. Okay to fix
# if there are no parse errors.
_, num_filtered_parse_errors = result.check_parse_errors()
if num_filtered_parse_errors == 0:
should_fix = True
if should_fix:
sql = result.paths[0].files[0].fix_string()[0]
return sql
|
28,018 |
def main(args):
"""
Store the defect results in the specified input list as bug reports in the
database.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
try:
cmd_config.check_config_file(args)
except FileNotFoundError as fnerr:
LOG.error(fnerr)
sys.exit(1)
if not host_check.check_zlib():
raise Exception("zlib is not available on the system!")
# To ensure the help message prints the default folder properly,
# the 'default' for 'args.input' is a string, not a list.
# But we need lists for the foreach here to work.
if isinstance(args.input, str):
args.input = [args.input]
if 'name' not in args:
LOG.debug("Generating name for analysis...")
generated = __get_run_name(args.input)
if generated:
setattr(args, 'name', generated)
else:
LOG.error("No suitable name was found in the inputs for the "
"analysis run. Please specify one by passing argument "
"--name run_name in the invocation.")
sys.exit(2) # argparse returns error code 2 for bad invocations.
LOG.info("Storing analysis results for run '" + args.name + "'")
if 'force' in args:
LOG.info("argument --force was specified: the run with name '" +
args.name + "' will be deleted.")
# Setup connection to the remote server.
client = libclient.setup_client(args.product_url)
_, zip_file = tempfile.mkstemp('.zip')
LOG.debug("Will write mass store ZIP to '%s'...", zip_file)
try:
LOG.debug("Assembling zip file.")
try:
assemble_zip(args.input, zip_file, client)
except Exception as ex:
print(ex)
import traceback
traceback.print_stack()
LOG.error("Failed to asseble zip file.")
sys.exit(1)
zip_size = os.stat(zip_file).st_size
LOG.debug("Assembling zip done, size is %s",
sizeof_fmt(zip_size))
if zip_size > MAX_UPLOAD_SIZE:
LOG.error("The result list to upload is too big (max: %s).",
sizeof_fmt(MAX_UPLOAD_SIZE))
sys.exit(1)
b64zip = ""
with open(zip_file, 'rb') as zf:
b64zip = base64.b64encode(zf.read()).decode("utf-8")
if len(b64zip) == 0:
LOG.info("Zip content is empty, nothing to store!")
sys.exit(1)
context = webserver_context.get_context()
trim_path_prefixes = args.trim_path_prefix if \
'trim_path_prefix' in args else None
description = args.description if 'description' in args else None
LOG.info("Storing results to the server...")
client.massStoreRun(args.name,
args.tag if 'tag' in args else None,
str(context.version),
b64zip,
'force' in args,
trim_path_prefixes,
description)
# Storing analysis statistics if the server allows them.
if client.allowsStoringAnalysisStatistics():
storing_analysis_statistics(client, args.input, args.name)
LOG.info("Storage finished successfully.")
except RequestFailed as reqfail:
if reqfail.errorCode == ErrorCode.SOURCE_FILE:
header = ['File', 'Line', 'Checker name']
table = twodim.to_str(
'table', header, [c.split('|') for c in reqfail.extraInfo])
LOG.warning("Setting the review statuses for some reports failed "
"because of non valid source code comments: "
"%s\n %s", reqfail.message, table)
sys.exit(1)
except Exception as ex:
import traceback
traceback.print_stack()
LOG.info("Storage failed: %s", str(ex))
sys.exit(1)
finally:
os.remove(zip_file)
|
def main(args):
"""
Store the defect results in the specified input list as bug reports in the
database.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
try:
cmd_config.check_config_file(args)
except FileNotFoundError as fnerr:
LOG.error(fnerr)
sys.exit(1)
if not host_check.check_zlib():
raise Exception("zlib is not available on the system!")
# To ensure the help message prints the default folder properly,
# the 'default' for 'args.input' is a string, not a list.
# But we need lists for the foreach here to work.
if isinstance(args.input, str):
args.input = [args.input]
if 'name' not in args:
LOG.debug("Generating name for analysis...")
generated = __get_run_name(args.input)
if generated:
setattr(args, 'name', generated)
else:
LOG.error("No suitable name was found in the inputs for the "
"analysis run. Please specify one by passing argument "
"--name run_name in the invocation.")
sys.exit(2) # argparse returns error code 2 for bad invocations.
LOG.info("Storing analysis results for run '" + args.name + "'")
if 'force' in args:
LOG.info("argument --force was specified: the run with name '" +
args.name + "' will be deleted.")
# Setup connection to the remote server.
client = libclient.setup_client(args.product_url)
_, zip_file = tempfile.mkstemp('.zip')
LOG.debug("Will write mass store ZIP to '%s'...", zip_file)
try:
LOG.debug("Assembling zip file.")
try:
assemble_zip(args.input, zip_file, client)
except Exception as ex:
print(ex)
import traceback
traceback.print_stack()
LOG.error("Failed to assemble zip file.")
sys.exit(1)
zip_size = os.stat(zip_file).st_size
LOG.debug("Assembling zip done, size is %s",
sizeof_fmt(zip_size))
if zip_size > MAX_UPLOAD_SIZE:
LOG.error("The result list to upload is too big (max: %s).",
sizeof_fmt(MAX_UPLOAD_SIZE))
sys.exit(1)
b64zip = ""
with open(zip_file, 'rb') as zf:
b64zip = base64.b64encode(zf.read()).decode("utf-8")
if len(b64zip) == 0:
LOG.info("Zip content is empty, nothing to store!")
sys.exit(1)
context = webserver_context.get_context()
trim_path_prefixes = args.trim_path_prefix if \
'trim_path_prefix' in args else None
description = args.description if 'description' in args else None
LOG.info("Storing results to the server...")
client.massStoreRun(args.name,
args.tag if 'tag' in args else None,
str(context.version),
b64zip,
'force' in args,
trim_path_prefixes,
description)
# Storing analysis statistics if the server allows them.
if client.allowsStoringAnalysisStatistics():
storing_analysis_statistics(client, args.input, args.name)
LOG.info("Storage finished successfully.")
except RequestFailed as reqfail:
if reqfail.errorCode == ErrorCode.SOURCE_FILE:
header = ['File', 'Line', 'Checker name']
table = twodim.to_str(
'table', header, [c.split('|') for c in reqfail.extraInfo])
LOG.warning("Setting the review statuses for some reports failed "
"because of non valid source code comments: "
"%s\n %s", reqfail.message, table)
sys.exit(1)
except Exception as ex:
import traceback
traceback.print_stack()
LOG.info("Storage failed: %s", str(ex))
sys.exit(1)
finally:
os.remove(zip_file)
|
17,712 |
def write_mcf(structure, filename, angle_style,
dihedral_style, lj14=None, coul14=None):
"""Output a Cassandra molecular connectivity file (MCF).
Outputs a Cassandra MCF from a Parmed structure object.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
angle_style : str
Type of angles. 'fixed' and 'harmonic'
are valid choices
dihedral_style : str
Type of dihedrals. 'harmonic', 'OPLS', 'CHARMM',
and 'none' are valid choices
lj14 : float
Scaling factor for LJ interactions on 1-4 pairs
coul14 : float
Scaling factor for Coulombic interactions on 1-4 pairs
Notes
-----
See https://cassandra.nd.edu/index.php/documentation for
a complete description of the MCF format.
"""
if not isinstance(structure, pmd.Structure):
raise ValueError("MCF writer requires parmed structure.")
if not all ([a.type for a in structure.atoms]):
raise ValueError("MCF writing not supported without "
"parameterized forcefield.")
# Conversion factors
IG_CONSTANT_KCAL = 0.00198720425864083 # kcal/mol*K
KCAL_TO_KJ = 4.184
# Check some things before we start writing the MCF
# Only will write MCF for Cassandra-supported options
if (angle_style.casefold() != 'fixed' and
angle_style.casefold() != 'harmonic'):
raise ValueError("Invalid selection for angle_style. "
"Please choose 'fixed' or 'harmonic'")
if len(structure.urey_bradleys) > 0 :
raise ValueError("Urey bradley terms detected. Cassandra only "
"currently supports fixed or harmonic angles.")
if (dihedral_style.casefold() != 'opls' and
dihedral_style.casefold() != 'charmm' and
dihedral_style.casefold() != 'none'):
raise ValueError("Invalid selection for dihedral_style. "
"Please choose 'OPLS', 'CHARMM', or 'none'")
if dihedral_style.casefold() != 'none':
if (len(structure.rb_torsions) > 0 and
dihedral_style.casefold() != 'opls'):
raise ValueError("Dihedral style declared as {} but "
"RB torsions found.".format(dihedral_style))
if (len(structure.dihedrals) > 0 and
dihedral_style.casefold() != 'charmm'):
raise ValueError("Dihedral style declared as {} but "
"charmm-style dihedrals found.".format(dihedral_style))
if (len(structure.rb_torsions) > 0 and
len(structure.dihedrals) > 0):
raise ValueError("Multiple dihedral styles detected, check your "
"Forcefield XML and structure")
# Identify atoms in rings and Cassandra 'fragments'
in_ring,frag_list,frag_conn = _id_rings_fragments(structure)
# Infer 1-4 scaling if not specified
if coul14 is None:
if len(structure.adjusts) > 0:
coul14 = structure.adjusts[0].type.chgscale
else:
coul14 = 0.0
if (len(structure.dihedrals) > 0 or
len(structure.rb_torsions) > 0):
warnings.warn('Unable to infer coulombic 1-4 '
'scaling factor. Setting to 0.0')
if lj14 is None:
if len(structure.adjusts) > 0:
type1_eps = structure.adjusts[0].atom1.epsilon
type2_eps = structure.adjusts[0].atom2.epsilon
scaled_eps = structure.adjusts[0].type.epsilon
if (structure.combining_rule == 'geometric' or
structure.combining_rule == 'lorentz'):
combined_eps = sqrt(type1_eps*type2_eps)
lj14 = scaled_eps/combined_eps
else:
lj14 = 0.0
warnings.warn('Unable to infer LJ 1-4 scaling'
'factor. Setting to 0.0')
else:
lj14 = 0.0
if (len(structure.dihedrals) > 0 or
len(structure.rb_torsions) > 0):
warnings.warn('Unable to infer LJ 1-4 scaling'
'factor. Setting to 0.0')
if coul14 < 0.0 or coul14 > 1.0:
raise ValueError("Unreasonable value {} for "
"coul14 scaling.".format(coul14))
if lj14 < 0.0 or lj14 > 1.0:
raise ValueError("Unreasonable value {} for "
"lj14 scaling.".format(lj14))
# Now we write the MCF file
with open(filename, 'w') as mcf_file:
header = ( '!***************************************'
'****************************************\n'
'!Molecular connectivity file\n'
'!***************************************'
'****************************************\n'
'!'+filename+' - created by mBuild\n\n'
)
mcf_file.write(header)
_write_atom_information(mcf_file, structure, in_ring,
IG_CONSTANT_KCAL)
_write_bond_information(mcf_file, structure)
_write_angle_information(mcf_file, structure, angle_style,
IG_CONSTANT_KCAL)
_write_dihedral_information(mcf_file, structure, dihedral_style,
KCAL_TO_KJ)
_write_improper_information(mcf_file, structure, KCAL_TO_KJ)
_write_fragment_information(mcf_file, structure, frag_list, frag_conn)
_write_intrascaling_information(mcf_file, lj14, coul14)
# That's all, folks!
mcf_file.write('\n\nEND\n')
|
def write_mcf(structure, filename, angle_style,
dihedral_style, lj14=None, coul14=None):
"""Output a Cassandra molecular connectivity file (MCF).
Outputs a Cassandra MCF from a Parmed structure object.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
angle_style : str
Type of angles. 'fixed' and 'harmonic'
are valid choices
dihedral_style : str
Type of dihedrals. 'harmonic', 'OPLS', 'CHARMM',
and 'none' are valid choices
lj14 : float
Scaling factor for LJ interactions on 1-4 pairs
coul14 : float
Scaling factor for Coulombic interactions on 1-4 pairs
Notes
-----
See https://cassandra.nd.edu/index.php/documentation for
a complete description of the MCF format.
"""
if not isinstance(structure, pmd.Structure):
raise ValueError("MCF writer requires parmed structure.")
if not all ([a.type for a in structure.atoms]):
raise ValueError("MCF writing not supported without "
"parameterized forcefield.")
# Conversion factors
IG_CONSTANT_KCAL = 0.00198720425864083 # kcal/mol*K
KCAL_TO_KJ = 4.184
# Check some things before we start writing the MCF
# Only will write MCF for Cassandra-supported options
if (angle_style.casefold() != 'fixed' and
angle_style.casefold() != 'harmonic'):
raise ValueError("Invalid selection for angle_style. "
"Please choose 'fixed' or 'harmonic'")
if len(structure.urey_bradleys) > 0 :
raise ValueError("Urey bradley terms detected. Cassandra only "
"currently supports fixed or harmonic angles.")
if (dihedral_style.casefold() != 'opls' and
dihedral_style.casefold() != 'charmm' and
dihedral_style.casefold() != 'none'):
raise ValueError("Invalid selection for dihedral_style. "
"Please choose 'OPLS', 'CHARMM', or 'none'")
if dihedral_style.casefold() != 'none':
if (len(structure.rb_torsions) > 0 and
dihedral_style.casefold() != 'opls'):
raise ValueError("Dihedral style declared as {} but "
"RB torsions found.".format(dihedral_style))
if (len(structure.dihedrals) > 0 and
dihedral_style.casefold() != 'charmm'):
raise ValueError("Dihedral style declared as {} but "
"charmm-style dihedrals found.".format(dihedral_style))
if (len(structure.rb_torsions) > 0 and
len(structure.dihedrals) > 0):
raise ValueError("Multiple dihedral styles detected, check your "
"Forcefield XML and structure")
# Identify atoms in rings and Cassandra 'fragments'
in_ring,frag_list,frag_conn = _id_rings_fragments(structure)
# Infer 1-4 scaling if not specified
if coul14 is None:
if len(structure.adjusts) > 0:
coul14 = structure.adjusts[0].type.chgscale
else:
coul14 = 0.0
if (len(structure.dihedrals) > 0 or
len(structure.rb_torsions) > 0):
warnings.warn('Unable to infer coulombic 1-4 '
'scaling factor. Setting to {0:.1f}'.format(coul14))
if lj14 is None:
if len(structure.adjusts) > 0:
type1_eps = structure.adjusts[0].atom1.epsilon
type2_eps = structure.adjusts[0].atom2.epsilon
scaled_eps = structure.adjusts[0].type.epsilon
if (structure.combining_rule == 'geometric' or
structure.combining_rule == 'lorentz'):
combined_eps = sqrt(type1_eps*type2_eps)
lj14 = scaled_eps/combined_eps
else:
lj14 = 0.0
warnings.warn('Unable to infer LJ 1-4 scaling'
'factor. Setting to 0.0')
else:
lj14 = 0.0
if (len(structure.dihedrals) > 0 or
len(structure.rb_torsions) > 0):
warnings.warn('Unable to infer LJ 1-4 scaling'
'factor. Setting to 0.0')
if coul14 < 0.0 or coul14 > 1.0:
raise ValueError("Unreasonable value {} for "
"coul14 scaling.".format(coul14))
if lj14 < 0.0 or lj14 > 1.0:
raise ValueError("Unreasonable value {} for "
"lj14 scaling.".format(lj14))
# Now we write the MCF file
with open(filename, 'w') as mcf_file:
header = ( '!***************************************'
'****************************************\n'
'!Molecular connectivity file\n'
'!***************************************'
'****************************************\n'
'!'+filename+' - created by mBuild\n\n'
)
mcf_file.write(header)
_write_atom_information(mcf_file, structure, in_ring,
IG_CONSTANT_KCAL)
_write_bond_information(mcf_file, structure)
_write_angle_information(mcf_file, structure, angle_style,
IG_CONSTANT_KCAL)
_write_dihedral_information(mcf_file, structure, dihedral_style,
KCAL_TO_KJ)
_write_improper_information(mcf_file, structure, KCAL_TO_KJ)
_write_fragment_information(mcf_file, structure, frag_list, frag_conn)
_write_intrascaling_information(mcf_file, lj14, coul14)
# That's all, folks!
mcf_file.write('\n\nEND\n')
|
35,570 |
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
The image can be a PIL Image or a torch Tensor, in which case it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively. Only list and tuple types are supported for Tensors.
fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant. Only int value is supported for Tensors.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
Only constant supported for Tensors.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image or Tensor: Padded image.
"""
if not isinstance(img, torch.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
|
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
The image can be a PIL Image or a torch Tensor, in which case it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively. Only list and tuple types are supported for Tensors.
fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant. Only int value is supported for Tensors.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
Only constant supported for Tensors for now.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image or Tensor: Padded image.
"""
if not isinstance(img, torch.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
|
9,208 |
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, wcs=None,
n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somewhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set of files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images. Users usually don't need
to worry about any of this, as GalSim will select the correct pupil image automatically based
on the SCA and bandpass provided.
The full pupil plane images are 4096 x 4096, which use a lot of memory and are somewhat slow
to use, so we normally bin them by a factor of 4 (resulting in 1024 x 1024 images). This
provides enough detail for most purposes and is much faster to render than using the full pupil
plane images. This bin factor is a settable parameter, called ``pupil_bin``. If you want the
more accurate, slower calculation using the full images, you can set it to 1. In the other
direction, using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly reasonable
results and is even faster to render. It is not generally recommended to use higher binning
than that, as the diffraction spikes will become noticeably degraded.
.. note::
This function will cache the aperture calculation, so repeated calls with the same
SCA and bandpass should be much faster after the first call, as the pupil plane will
already be loaded. If you need to clear the cache for memory reasons, you may call::
galsim.roman.roman_psfs._make_aperture.clear()
to recover any memory currently being used for this cache. Of course, subsequent calls to
`getPSF` will need to rebuild the aperture at that point.
The PSF that is returned by default will be oriented with respect to the SCA coordinates,
not world coordinates as is typical in GalSim. The pupil plane has a fixed orientation
with respect to the focal plane, so the PSF rotates with the telescope. To obtain a
PSF in world coordinates, which can be convolved with galaxies (who are normally described
in world coordinates), you may pass in a ``wcs`` parameter to this function. This will
project the PSF into world coordinates according to that WCS before returning it. Otherwise,
the return value is equivalent to using ``wcs=galim.PixelScale(galsim.roman.pixel_scale)``.
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
wcs: The WCS to use to project the PSF into world coordinates.
[default: galsim.PixelScale(galsim.roman.pixel_scale)]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from ..wcs import PixelScale
from . import n_pix, n_sca, longwave_bands, shortwave_bands, pixel_scale
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
# Apply WCS.
# The current version is in arcsec units, but oriented parallel to the image coordinates.
# So to apply the right WCS, project to pixels using the Roman mean pixel_scale, then
# project back to world coordinates with the provided wcs.
if wcs is not None:
scale = PixelScale(pixel_scale)
psf = wcs.toWorld(scale.toImage(psf), image_pos=SCA_pos)
return psf
|
def getPSF(SCA, bandpass,
SCA_pos=None, pupil_bin=4, wcs=None,
n_waves=None, extra_aberrations=None,
wavelength=None, gsparams=None,
logger=None, high_accuracy=None, approximate_struts=None):
"""Get a single PSF for Roman ST observations.
The user must provide the SCA and bandpass; the latter is used when setting up the pupil
plane configuration and when interpolating chromatic information, if requested.
This routine carries out linear interpolation of the aberrations within a given SCA, based on
the Roman (then WFIRST) Cycle 7 specification of the aberrations as a function of focal plane
position, more specifically from ``Roman_Phase-A_SRR_WFC_Zernike_and_Field_Data_170727.xlsm``
downloaded from https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html. Phase
B updates that became available in mid-2019 have not yet been incorporated into this module.
(Note: the files at that url still use the old WFIRST name. We have renamed them to use the
new name of the telescope, Roman, after downloading.)
The mask images for the Roman pupil plane are available at the Roman Reference Information
page: https://roman.gsfc.nasa.gov/science/Roman_Reference_Information.html.
There are separate files for each SCA, since the view of the spider pattern varies somewhat
across the field of view of the wide field camera. Furthermore, the effect of the obscuration
is somewhat different at longer wavelengths, so F184 has a different set of files than the
other filters. cf. the ``galsm.roman.longwave_bands`` and ``galsim.roman.shortwave_bands``
attributes, which define which bands use which pupil plane images. Users usually don't need
to worry about any of this, as GalSim will select the correct pupil image automatically based
on the SCA and bandpass provided.
The full pupil plane images are 4096 x 4096, which use a lot of memory and are somewhat slow
to use, so we normally bin them by a factor of 4 (resulting in 1024 x 1024 images). This
provides enough detail for most purposes and is much faster to render than using the full pupil
plane images. This bin factor is a settable parameter, called ``pupil_bin``. If you want the
more accurate, slower calculation using the full images, you can set it to 1. In the other
direction, using pupil_bin=8 (resulting in a 512 x 512 image) still provides fairly reasonable
results and is even faster to render. It is not generally recommended to use higher binning
than that, as the diffraction spikes will become noticeably degraded.
.. note::
This function will cache the aperture calculation, so repeated calls with the same
SCA and bandpass should be much faster after the first call, as the pupil plane will
already be loaded. If you need to clear the cache for memory reasons, you may call::
galsim.roman.roman_psfs._make_aperture.clear()
to recover any memory currently being used for this cache. Of course, subsequent calls to
`getPSF` will need to rebuild the aperture at that point.
The PSF that is returned by default will be oriented with respect to the SCA coordinates,
not world coordinates as is typical in GalSim. The pupil plane has a fixed orientation
with respect to the focal plane, so the PSF rotates with the telescope. To obtain a
PSF in world coordinates, which can be convolved with galaxies (that are normally described
in world coordinates), you may pass in a ``wcs`` parameter to this function. This will
project the PSF into world coordinates according to that WCS before returning it. Otherwise,
the return value is equivalent to using ``wcs=galim.PixelScale(galsim.roman.pixel_scale)``.
The calculation takes advantage of the fact that the diffraction limit and aberrations have a
simple, understood wavelength-dependence. (The Roman project webpage for Cycle 7 does in fact
provide aberrations as a function of wavelength, but the deviation from the expected chromatic
dependence is sub-percent so we neglect it here.) For reference, the script used to parse the
Zernikes given on the webpage and create the files in the GalSim repository can be found in
``devel/external/parse_roman_zernikes_1217.py``. The resulting chromatic object can be used to
draw into any of the Roman bandpasses, though the pupil plane configuration will only be
correct for those bands in the same range (i.e., long- or short-wavelength bands).
For applications that require very high accuracy in the modeling of the PSF, with very limited
aliasing, you may want to lower the folding_threshold in the gsparams. Otherwise very bright
stars will show some reflections in the spider pattern and possibly some boxiness at the
outskirts of the PSF. Using ``gsparams = GSParams(folding_threshold=2.e-3)`` generally
provides good results even for very bright (e.g. mag=10) stars. In these cases, you probably
also want to reduce ``pupil_bin`` somewhat from the default value of 4.
By default, no additional aberrations are included above the basic design. However, users can
provide an optional keyword ``extra_aberrations`` that will be included on top of those that are
part of the design. This should be in the same format as for the ChromaticOpticalPSF class,
with units of waves at the fiducial wavelength, 1293 nm. Currently, only aberrations up to order
22 (Noll convention) are simulated. For Roman, the tolerance for additional
aberrations was a total of 90 nanometers RMS as of mid-2015, distributed largely among coma,
astigmatism, trefoil, and spherical aberrations (NOT defocus). This information might serve as
a guide for reasonable ``extra_aberrations`` inputs. The reference for that number is
an earlier Cycle 5 document:
http://roman.gsfc.nasa.gov/science/sdt_public/wps/references/instrument/README_AFTA_C5_WFC_Zernike_and_Field_Data.pdf
However, the default (non-extra) aberrations are from Cycle 7 material linked earlier in this
docstring.
Jitter and charge diffusion are, by default, not included. Users who wish to include these can
find some guidelines for typical length scales of the Gaussians that can represent these
effects, and convolve the ChromaticOpticalPSF with appropriate achromatic Gaussians.
The PSFs are always defined assuming the user will specify length scales in arcsec.
Users may find they do not have to call `getPSF` for all objects in their simulations; for a
given SCA and position within the SCA, and a given pupil plane configuration and wavelength
information, it should be possible to reuse the PSFs.
Parameters:
SCA: Single value specifying the SCA for which the PSF should be
loaded.
bandpass: Single string specifying the bandpass to use when defining the
pupil plane configuration and/or interpolation of chromatic PSFs.
You may also pass a string 'long' or 'short' for this argument, in
which case, the correct pupil plane configuration will be used for
long- or short-wavelength bands (F184 is long, all else is short).
In this case, no interpolation can be used, since it is defined
using the extent of the chosen bandpass. If ``wavelength`` is given,
then bandpass may be None, which will use the short-wavelength pupil
plane image.
SCA_pos: Single galsim.PositionD indicating the position within the SCA
for which the PSF should be created. If None, the exact center of
the SCA is chosen. [default: None]
pupil_bin: The binning to apply to the pupil plane image. (See discussion above.)
[default: 4]
wcs: The WCS to use to project the PSF into world coordinates.
[default: galsim.PixelScale(galsim.roman.pixel_scale)]
n_waves: Number of wavelengths to use for setting up interpolation of the
chromatic PSF objects, which can lead to much faster image
rendering. If None, then no interpolation is used. Note that
users who want to interpolate can always set up the interpolation
later on even if they do not do so when calling `getPSF`.
[default: None]
extra_aberrations: Array of extra aberrations to include in the PSF model, on top of
those that are part of the Roman design. These should be
provided in units of waves at the fiducial wavelength of 1293 nm,
as an array of length 23 with entries 4 through 22 corresponding
to defocus through the 22nd Zernike in the Noll convention.
[default: None]
wavelength: An option to get an achromatic PSF for a single wavelength, for
users who do not care about chromaticity of the PSF. If None,
then the fully chromatic PSF is returned. Alternatively the user
should supply either (a) a wavelength in nanometers, and they
will get achromatic OpticalPSF objects for that wavelength, or
(b) a bandpass object, in which case they will get achromatic
OpticalPSF objects defined at the effective wavelength of that
bandpass. [default: False]
gsparams: An optional GSParams argument. See the docstring for GSParams
for details. [default: None]
Returns:
A single PSF object (either a ChromaticOpticalPSF or an OpticalPSF depending on the
inputs).
"""
from ..position import PositionD
from ..errors import GalSimValueError, GalSimRangeError
from ..bandpass import Bandpass
from ..wcs import PixelScale
from . import n_pix, n_sca, longwave_bands, shortwave_bands, pixel_scale
# Deprecated options
if high_accuracy:
if approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True,approximate_struts=True', 2.3,
'pupil_bin=4, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 4
else:
from ..deprecated import depr
from ..gsparams import GSParams
depr('high_accuracy=True', 2.3,
'pupil_bin=1, gsparams=galsim.GSParams(folding_threshold=2.e-3)',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
# Set folding_threshold 2.5x smaller than default.
gsparams = GSParams.check(gsparams, folding_threshold=2.e-3)
pupil_bin = 1
elif approximate_struts:
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=True', 2.3, 'pupil_bin=8',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 8
elif approximate_struts is False or high_accuracy is False:
# If they are explicitly given, rather than default (None), then trigger this.
from ..deprecated import depr
from ..gsparams import GSParams
depr('approximate_struts=False, high_accuracy=False', 2.3, 'pupil_bin=4',
'Note: this is not actually equivalent to the old behavior, but it should '
'be both faster and more accurate than the corresponding PSF in v2.2.')
pupil_bin = 4
if SCA <= 0 or SCA > n_sca:
raise GalSimRangeError("Invalid SCA.", SCA, 1, n_sca)
# SCA_pos: if None, then all should just be center of the SCA.
if SCA_pos is None:
SCA_pos = PositionD(n_pix/2, n_pix/2)
# Parse the bandpasses to see which pupil plane image is needed
pupil_plane_type = None
if bandpass in longwave_bands or bandpass=='long':
pupil_plane_type = 'long'
elif bandpass in shortwave_bands or bandpass=='short':
pupil_plane_type = 'short'
elif bandpass is None and n_waves is None:
pupil_plane_type = 'short'
else:
raise GalSimValueError("Bandpass not a valid Roman bandpass or 'short'/'long'.",
bandpass, default_bandpass_list)
# If bandpass is 'short'/'long', then make sure that interpolation is not called for, since that
# requires an actual bandpass.
if bandpass in ['short','long'] and n_waves is not None:
raise GalSimValueError("Cannot use bandpass='short'/'long' with interpolation.", bandpass)
if not isinstance(wavelength, (Bandpass, float, type(None))):
raise TypeError("wavelength should either be a Bandpass, float, or None.")
# Now call _get_single_PSF().
psf = _get_single_PSF(SCA, bandpass, SCA_pos, pupil_bin,
n_waves, extra_aberrations, wavelength,
pupil_plane_type, gsparams)
# Apply WCS.
# The current version is in arcsec units, but oriented parallel to the image coordinates.
# So to apply the right WCS, project to pixels using the Roman mean pixel_scale, then
# project back to world coordinates with the provided wcs.
if wcs is not None:
scale = PixelScale(pixel_scale)
psf = wcs.toWorld(scale.toImage(psf), image_pos=SCA_pos)
return psf
|
1,701 |
def plot_confusion_matrix(estimator, X, y_true, labels=None,
sample_weight=None, normalize=None,
display_labels=None, include_values=True,
xticks_rotation='horizontal',
values_format=None,
cmap='viridis', ax=None):
"""Plot Confusion Matrix.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to reorder or
select a subset of labels. If `None` is given, those that appear at
least once in `y_true` or `y_pred` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
display_labels : array-like of shape (n_classes,), default=None
Target names used for plotting. By default, `labels` will be used if
it is defined, otherwise the unique labels of `y_true` and `y_pred`
will be used.
include_values : bool, default=True
Includes values in confusion matrix.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`,
the format specification is '.2g'.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import plot_confusion_matrix
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
>>> plot_confusion_matrix(clf, X_test, y_test)
>>> plt.show()
"""
check_matplotlib_support("plot_confusion_matrix")
if not is_classifier(estimator):
raise ValueError("plot_confusion_matrix only supports classifiers")
y_pred = estimator.predict(X)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight,
labels=labels, normalize=normalize)
if display_labels is None:
if labels is None:
display_labels = estimator.classes_
else:
display_labels = labels
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=display_labels)
return disp.plot(include_values=include_values,
cmap=cmap, ax=ax, xticks_rotation=xticks_rotation,
values_format=values_format)
|
def plot_confusion_matrix(estimator, X, y_true, labels=None,
sample_weight=None, normalize=None,
display_labels=None, include_values=True,
xticks_rotation='horizontal',
values_format=None,
cmap='viridis', ax=None):
"""Plot Confusion Matrix.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
estimator : estimator instance
Trained classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
labels : array-like of shape (n_classes,), default=None
List of labels to index the matrix. This may be used to reorder or
select a subset of labels. If `None` is given, those that appear at
least once in `y_true` or `y_pred` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
normalize : {'true', 'pred', 'all'}, default=None
Normalizes confusion matrix over the true (rows), predicted (columns)
conditions or all the population. If None, confusion matrix will not be
normalized.
display_labels : array-like of shape (n_classes,), default=None
Target names used for plotting. By default, `labels` will be used if
it is defined, otherwise the unique labels of `y_true` and `y_pred`
will be used.
include_values : bool, default=True
Includes values in confusion matrix.
xticks_rotation : {'vertical', 'horizontal'} or float, \
default='horizontal'
Rotation of xtick labels.
values_format : str, default=None
Format specification for values in confusion matrix. If `None`,
the format specification is '.2g'.
cmap : str or matplotlib Colormap, default='viridis'
Colormap recognized by matplotlib.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
Returns
-------
display : :class:`~sklearn.metrics.ConfusionMatrixDisplay`
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import plot_confusion_matrix
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
>>> plot_confusion_matrix(clf, X_test, y_test) # doctest: +SKIP
>>> plt.show()
"""
check_matplotlib_support("plot_confusion_matrix")
if not is_classifier(estimator):
raise ValueError("plot_confusion_matrix only supports classifiers")
y_pred = estimator.predict(X)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight,
labels=labels, normalize=normalize)
if display_labels is None:
if labels is None:
display_labels = estimator.classes_
else:
display_labels = labels
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=display_labels)
return disp.plot(include_values=include_values,
cmap=cmap, ax=ax, xticks_rotation=xticks_rotation,
values_format=values_format)
|
1,175 |
def ICC_rep_anova(Y, nocache=False):
"""
the data Y are entered as a 'table' ie subjects are in rows and repeated
measures in columns
One Sample Repeated measure ANOVA
Y = XB + E with X = [FaTor / Subjects]
This is a hacked up (but fully compatible) version of ICC_rep_anova
from nipype that caches some very expensive operations that depend
only on the input array shape - if you're going to run the routine
multiple times (like, on every voxel of an image), this gives you a
HUGE speed boost for large input arrays. If you change the dimensions
of Y, it will reinitialize automatially. Set nocache to True to get
the original, much slower behavior. No, actually, don't do that.
"""
global icc_inited
global current_Y_shape
global dfc, dfe, dfr
global nb_subjects, nb_conditions
global x, x0, X
global centerbit
try:
current_Y_shape
if nocache or (current_Y_shape != Y.shape):
icc_inited = False
except NameError:
icc_inited = False
if not icc_inited:
[nb_subjects, nb_conditions] = Y.shape
current_Y_shape = Y.shape
dfc = nb_conditions - 1
dfe = (nb_subjects - 1) * dfc
dfr = nb_subjects - 1
# Compute the repeated measure effect
# ------------------------------------
# Sum Square Total
mean_Y = mean(Y)
SST = ((Y - mean_Y) ** 2).sum()
# create the design matrix for the different levels
if not icc_inited:
x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions
x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects
X = hstack([x, x0])
centerbit = dot(dot(X, pinv(dot(X.T, X))), X.T)
# Sum Square Error
predicted_Y = dot(centerbit, Y.flatten("F"))
residuals = Y.flatten("F") - predicted_Y
SSE = (residuals ** 2).sum()
residuals.shape = Y.shape
MSE = SSE / dfe
# Sum square session effect - between columns/sessions
SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects
MSC = SSC / dfc / nb_subjects
session_effect_F = MSC / MSE
# Sum Square subject effect - between rows/subjects
SSR = SST - SSC - SSE
MSR = SSR / dfr
# ICC(3,1) = (mean square subjeT - mean square error) /
# (mean square subjeT + (k-1)*-mean square error)
ICC = nan_to_num((MSR - MSE) / (MSR + dfc * MSE))
e_var = MSE # variance of error
r_var = (MSR - MSE) / nb_conditions # variance between subjects
icc_inited = True
return ICC, r_var, e_var, session_effect_F, dfc, dfe
|
def ICC_rep_anova(Y, nocache=False):
"""
the data Y are entered as a 'table' ie subjects are in rows and repeated
measures in columns
One Sample Repeated measure ANOVA
Y = XB + E with X = [FaTor / Subjects]
This is a hacked up (but fully compatible) version of ICC_rep_anova
from nipype that caches some very expensive operations that depend
only on the input array shape - if you're going to run the routine
multiple times (like, on every voxel of an image), this gives you a
HUGE speed boost for large input arrays. If you change the dimensions
of Y, it will reinitialize automatially. Set nocache to True to get
the original, much slower behavior. No, actually, don't do that.
"""
global icc_inited
global current_Y_shape
global dfc, dfe, dfr
global nb_subjects, nb_conditions
global x, x0, X
global centerbit
try:
current_Y_shape
if nocache or (current_Y_shape != Y.shape):
icc_inited = False
except NameError:
icc_inited = False
if not icc_inited:
[nb_subjects, nb_conditions] = Y.shape
current_Y_shape = Y.shape
dfc = nb_conditions - 1
dfe = (nb_subjects - 1) * dfc
dfr = nb_subjects - 1
# Compute the repeated measure effect
# ------------------------------------
# Sum Square Total
mean_Y = mean(Y)
SST = ((Y - mean_Y) ** 2).sum()
# create the design matrix for the different levels
if not icc_inited:
x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions
x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects
X = hstack([x, x0])
centerbit = dot(dot(X, pinv(dot(X.T, X))), X.T)
# Sum Square Error
predicted_Y = centerbit @ Y.flatten("F")
residuals = Y.flatten("F") - predicted_Y
SSE = (residuals ** 2).sum()
residuals.shape = Y.shape
MSE = SSE / dfe
# Sum square session effect - between columns/sessions
SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects
MSC = SSC / dfc / nb_subjects
session_effect_F = MSC / MSE
# Sum Square subject effect - between rows/subjects
SSR = SST - SSC - SSE
MSR = SSR / dfr
# ICC(3,1) = (mean square subjeT - mean square error) /
# (mean square subjeT + (k-1)*-mean square error)
ICC = nan_to_num((MSR - MSE) / (MSR + dfc * MSE))
e_var = MSE # variance of error
r_var = (MSR - MSE) / nb_conditions # variance between subjects
icc_inited = True
return ICC, r_var, e_var, session_effect_F, dfc, dfe
|
59,275 |
def __getattr__(name, canonical=_resources_abc):
"""
For backwards compatibility, continue to make names
from canonical available through this module.
"""
if name in canonical.__all__:
obj = getattr(canonical, name)
import warnings
warnings.warn(
f"Using or importing the ABCs from {__name__!r} instead "
f"of from {canonical.__name__!r} is deprecated since "
"Python 3.11, and in 3.13 it will stop working",
DeprecationWarning,
stacklevel=2,
)
globals()[name] = obj
return obj
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
|
def __getattr__(name, canonical=_resources_abc):
"""
For backwards compatibility, continue to make names
from canonical available through this module.
"""
if name in canonical.__all__:
obj = getattr(canonical, name)
import warnings
warnings.warn(
f"Using or importing the ABCs from {__name__!r} instead "
f"of from {canonical.__name__!r} is now deprecated, "
"scheduled for removal in Python 3.13",
DeprecationWarning,
stacklevel=2,
)
globals()[name] = obj
return obj
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
|
996 |
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print(ec.active_colors)
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal,
ExecutingNode = 'bg:#00005f'
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#005f00'
))
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#dddddd'
))
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows.
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
return ex_colors
|
def exception_colors():
"""Return a color table with fields for exception reporting.
The table is an instance of ColorSchemeTable with schemes added for
'Neutral', 'Linux', 'LightBG' and 'NoColor' and fields for exception handling filled
in.
Examples:
>>> ec = exception_colors()
>>> ec.active_scheme_name
''
>>> print(ec.active_colors)
None
Now we activate a color scheme:
>>> ec.set_active_scheme('NoColor')
>>> ec.active_scheme_name
'NoColor'
>>> sorted(ec.active_colors.keys())
['Normal', 'caret', 'em', 'excName', 'filename', 'filenameEm', 'line',
'lineno', 'linenoEm', 'name', 'nameEm', 'normalEm', 'topline', 'vName',
'val', 'valEm']
"""
ex_colors = ColorSchemeTable()
# Populate it with color schemes
C = TermColors # shorthand and local lookup
ex_colors.add_scheme(ColorScheme(
'NoColor',
# The color to be used for the top line
topline = C.NoColor,
# The colors to be used in the traceback
filename = C.NoColor,
lineno = C.NoColor,
name = C.NoColor,
vName = C.NoColor,
val = C.NoColor,
em = C.NoColor,
# Emphasized colors for the last frame of the traceback
normalEm = C.NoColor,
filenameEm = C.NoColor,
linenoEm = C.NoColor,
nameEm = C.NoColor,
valEm = C.NoColor,
# Colors for printing the exception
excName = C.NoColor,
line = C.NoColor,
caret = C.NoColor,
Normal = C.NoColor
))
# make some schemes as instances so we can copy them for modification easily
ex_colors.add_scheme(ColorScheme(
'Linux',
# The color to be used for the top line
topline = C.LightRed,
# The colors to be used in the traceback
filename = C.Green,
lineno = C.Green,
name = C.Purple,
vName = C.Cyan,
val = C.Green,
em = C.LightCyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.LightCyan,
filenameEm = C.LightGreen,
linenoEm = C.LightGreen,
nameEm = C.LightPurple,
valEm = C.LightBlue,
# Colors for printing the exception
excName = C.LightRed,
line = C.Yellow,
caret = C.White,
Normal = C.Normal,
ExecutingNode = 'bg:#00005f'
))
# For light backgrounds, swap dark/light colors
ex_colors.add_scheme(ColorScheme(
'LightBG',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#005f00',
))
ex_colors.add_scheme(ColorScheme(
'Neutral',
# The color to be used for the top line
topline = C.Red,
# The colors to be used in the traceback
filename = C.LightGreen,
lineno = C.LightGreen,
name = C.LightPurple,
vName = C.Cyan,
val = C.LightGreen,
em = C.Cyan,
# Emphasized colors for the last frame of the traceback
normalEm = C.Cyan,
filenameEm = C.Green,
linenoEm = C.Green,
nameEm = C.Purple,
valEm = C.Blue,
# Colors for printing the exception
excName = C.Red,
#line = C.Brown, # brown often is displayed as yellow
line = C.Red,
caret = C.Normal,
Normal = C.Normal,
ExecutingNode = 'bg:#dddddd'
))
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows.
if os.name == "nt":
ex_colors.add_scheme(ex_colors['Linux'].copy('Neutral'))
return ex_colors
|
47,268 |
def convert_to_localized_md(model_list, localized_model_list, format_str):
"""Convert `model_list` to each localized README."""
def _rep(match):
title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()
return format_str.format(
title=title,
model_link=model_link,
paper_affiliations=paper_affiliations,
paper_title_link=paper_title_link,
paper_authors=paper_authors,
supplements=" " + supplements.strip() if len(supplements) != 0 else "",
)
# This regex captures metadata from an English model description, including model title, model link,
# affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example).
_re_capture_meta = re.compile(
r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"
)
# This regex is used to synchronize link.
_re_capture_title_link = re.compile(r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*")
num_models_equal = True
if len(localized_model_list) == 0:
localized_model_index = {}
else:
try:
localized_model_index = {
re.search(r"\*\*\[([^\]]*)", line).groups()[0]: line
for line in localized_model_list.strip().split("\n")
}
except AttributeError:
raise AttributeError("A model name in localized READMEs cannot be recognized.")
for model in model_list.strip().split("\n"):
title, model_link = re.search(_re_capture_title_link, model).groups()
if title not in localized_model_index:
num_models_equal = False
# Add an anchor white space behind a model description string for regex.
# If metadata cannot be captured, the English version will be directly copied.
localized_model_index[title] = re.sub(_re_capture_meta, _rep, model + " ")
else:
# Synchronize link
localized_model_index[title] = re.sub(
_re_capture_title_link, f"**[{title}]({model_link})**", localized_model_index[title], count=1
)
sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())
return num_models_equal, "\n".join(map(lambda x: x[1], sorted_index)) + "\n"
|
def convert_to_localized_md(model_list, localized_model_list, format_str):
"""Convert `model_list` to each localized README."""
def _rep(match):
title, model_link, paper_affiliations, paper_title_link, paper_authors, supplements = match.groups()
return format_str.format(
title=title,
model_link=model_link,
paper_affiliations=paper_affiliations,
paper_title_link=paper_title_link,
paper_authors=paper_authors,
supplements=" " + supplements.strip() if len(supplements) != 0 else "",
)
# This regex captures metadata from an English model description, including model title, model link,
# affiliations of the paper, title of the paper, authors of the paper, and supplemental data (see DistilBERT for example).
_re_capture_meta = re.compile(
r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\* \(from ([^)]*)\)[^\[]*([^\)]*\)).*?by (.*?[A-Za-z\*]{2,}?)\. (.*)$"
)
# This regex is used to synchronize link.
_re_capture_title_link = re.compile(r"\*\*\[([^\]]*)\]\(([^\)]*)\)\*\*")
num_models_equal = True
if len(localized_model_list) == 0:
localized_model_index = {}
else:
try:
localized_model_index = {
re.search(r"\*\*\[([^\]]*)", line).groups()[0]: line
for line in localized_model_list.strip().split("\n")
}
except AttributeError:
raise AttributeError("A model name in localized READMEs cannot be recognized.")
for model in model_list.strip().split("\n"):
title, model_link = re_capture_title_link.search(model).groups()
if title not in localized_model_index:
num_models_equal = False
# Add an anchor white space behind a model description string for regex.
# If metadata cannot be captured, the English version will be directly copied.
localized_model_index[title] = re.sub(_re_capture_meta, _rep, model + " ")
else:
# Synchronize link
localized_model_index[title] = re.sub(
_re_capture_title_link, f"**[{title}]({model_link})**", localized_model_index[title], count=1
)
sorted_index = sorted(localized_model_index.items(), key=lambda x: x[0].lower())
return num_models_equal, "\n".join(map(lambda x: x[1], sorted_index)) + "\n"
|
17,768 |
def _roles_custom_formatter(string: str) -> str:
if current_app.config.get("AUTH_ROLES_SYNC_AT_LOGIN", False):
string += (
". <b>Warning: AUTH_ROLES_SYNC_AT_LOGIN is enabled. "
"Changes to this field will not persiste."
)
return string
|
def _roles_custom_formatter(string: str) -> str:
if current_app.config.get("AUTH_ROLES_SYNC_AT_LOGIN", False):
string += (
". <b>Warning: AUTH_ROLES_SYNC_AT_LOGIN is enabled. "
"Changes to this field will not persist."
)
return string
|
6,071 |
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime and cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
if ram and ram > int(queueDict['MaxRAM']):
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeString(parameter)
if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0:
noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
if noMatchReasons:
return S_OK({'Match': False, 'Reason': noMatchReasons})
return S_OK({'Match': True, 'Reason': noMatchReasons})
|
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime > queueDict.get('CPUTime', 0):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
if ram and ram > int(queueDict['MaxRAM']):
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeString(parameter)
if value and opsHelper.getValue('JobScheduling/RunningLimit/%s/%s/%s' % (site, parameter, value), 1) == 0:
noMatchReasons.append('Resource operational %s requirement not satisfied' % parameter)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
if noMatchReasons:
return S_OK({'Match': False, 'Reason': noMatchReasons})
return S_OK({'Match': True, 'Reason': noMatchReasons})
|
42,987 |
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (array[complex]): The numeric operator to be applied to the state, of shape `[trunc]*(2*n)`
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
35,822 |
def resize_image_tensor(
image: torch.Tensor,
size: Union[List[int], int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: bool = False,
) -> torch.Tensor:
if isinstance(size, int):
size = [size]
num_channels, old_height, old_width = get_dimensions_image_tensor(image)
new_height, new_width = _compute_resized_output_size((old_height, old_width), size=size, max_size=max_size)
extra_dims = image.shape[:-3]
if image.numel() > 0:
image = _FT.resize(
image.view(-1, num_channels, old_height, old_width),
size=[new_height, new_width],
interpolation=interpolation.value,
antialias=antialias,
)
return image.view(extra_dims + (num_channels, new_height, new_width))
|
def resize_image_tensor(
image: torch.Tensor,
size: Union[int, List[int]],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: bool = False,
) -> torch.Tensor:
if isinstance(size, int):
size = [size]
num_channels, old_height, old_width = get_dimensions_image_tensor(image)
new_height, new_width = _compute_resized_output_size((old_height, old_width), size=size, max_size=max_size)
extra_dims = image.shape[:-3]
if image.numel() > 0:
image = _FT.resize(
image.view(-1, num_channels, old_height, old_width),
size=[new_height, new_width],
interpolation=interpolation.value,
antialias=antialias,
)
return image.view(extra_dims + (num_channels, new_height, new_width))
|
16,164 |
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
idx=req.find("==")
if idx >= 0:
return any(ign.lower() == req[0:idx].lower() for ign in COMMENT_REQUIREMENTS)
return any(ign.lower() == req.lower() for ign in COMMENT_REQUIREMENTS)
|
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(f"{ign.lower()}==" in req.lower() for ign in COMMENT_REQUIREMENTS)
|
5,337 |
def validate(config):
_config = {}
list(map(_config.update, config))
if isinstance(_config["salt_fun"], str):
# a simple str is taking as the single function with no args / kwargs
fun = _config["salt_fun"]
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
else:
for entry in _config["salt_fun"]:
if isinstance(entry, dict):
# check dict is of correct form
fun, args_kwargs_dict = list(entry.items())[0]
for key in args_kwargs_dict.keys():
if key == "args":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"args key for fun {} must be list".format(fun),
)
elif key == "kwargs":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"kwargs key for fun {} must be list of key value pairs".format(
fun
),
)
for key_value in args_kwargs_dict[key]:
if not isinstance(key_value, dict):
return (
False,
"{} is not a key / value pair".format(key_value),
)
else:
return (
False,
"key {} not allowed under fun {}".format(key, fun),
)
else:
# entry must be function itself
fun = entry
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
return True, "valid config"
|
def validate(config):
_config = {}
list(map(_config.update, config))
if isinstance(_config["salt_fun"], str):
# a simple str is taking as the single function with no args / kwargs
fun = _config["salt_fun"]
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
else:
for entry in _config["salt_fun"]:
if isinstance(entry, dict):
# check dict is of correct form
fun, args_kwargs_dict = next(iter(entry.items()))
for key in args_kwargs_dict.keys():
if key == "args":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"args key for fun {} must be list".format(fun),
)
elif key == "kwargs":
if not isinstance(args_kwargs_dict[key], list):
return (
False,
"kwargs key for fun {} must be list of key value pairs".format(
fun
),
)
for key_value in args_kwargs_dict[key]:
if not isinstance(key_value, dict):
return (
False,
"{} is not a key / value pair".format(key_value),
)
else:
return (
False,
"key {} not allowed under fun {}".format(key, fun),
)
else:
# entry must be function itself
fun = entry
if fun not in __salt__:
return False, "{} not in __salt__".format(fun)
return True, "valid config"
|
7,481 |
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
import_fail = ''
try:
import numpy
except ImportError:
import_fail = 'Cannot import numpy at all.'
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy. {1}".format(__minimum_numpy_version__, import_fail))
raise ImportError(msg)
return numpy
|
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
import_fail = ''
try:
import numpy
except ImportError:
import_fail = 'Numpy is not installed.'
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy. {1}".format(__minimum_numpy_version__, import_fail))
raise ImportError(msg)
return numpy
|
937 |
def simple_cythonize(src, destdir=None, cwd=None, **cy_kwargs):
""" Generates a C file from a Cython source file.
Parameters
==========
src: str
Path to Cython source.
destdir: str (optional)
Path to output directory (default: '.').
cwd: path string (optional)
Root of relative paths (default: '.').
**cy_kwargs:
Second argument passed to cy_compile. Generates a .cpp file if ``cplus=True`` in ``cy_kwargs``,
else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
# Set language_level if not set by cy_kwargs
# as not setting it is deprecated
if 'language_level' not in cy_kwargs:
cy_options['language_level'] = 3
cy_result = cy_compile([src], cy_options)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name), destdir)
finally:
os.chdir(ori_dir)
return dstfile
|
def simple_cythonize(src, destdir=None, cwd=None, **cy_kwargs):
""" Generates a C file from a Cython source file.
Parameters
==========
src: str
Path to Cython source.
destdir: str (optional)
Path to output directory (default: '.').
cwd: path string (optional)
Root of relative paths (default: '.').
**cy_kwargs:
Second argument passed to cy_compile. Generates a .cpp file if ``cplus=True`` in ``cy_kwargs``,
else a .c file.
"""
from Cython.Compiler.Main import (
default_options, CompilationOptions
)
from Cython.Compiler.Main import compile as cy_compile
assert src.lower().endswith('.pyx') or src.lower().endswith('.py')
cwd = cwd or '.'
destdir = destdir or '.'
ext = '.cpp' if cy_kwargs.get('cplus', False) else '.c'
c_name = os.path.splitext(os.path.basename(src))[0] + ext
dstfile = os.path.join(destdir, c_name)
if cwd:
ori_dir = os.getcwd()
else:
ori_dir = '.'
os.chdir(cwd)
try:
cy_options = CompilationOptions(default_options)
cy_options.__dict__.update(cy_kwargs)
# Set language_level if not set by cy_kwargs
# as not setting it is deprecated
if 'language_level' not in cy_kwargs:
cy_options.__dict__['language_level'] = 3
cy_result = cy_compile([src], cy_options)
if cy_result.num_errors > 0:
raise ValueError("Cython compilation failed.")
if os.path.abspath(os.path.dirname(src)) != os.path.abspath(destdir):
if os.path.exists(dstfile):
os.unlink(dstfile)
shutil.move(os.path.join(os.path.dirname(src), c_name), destdir)
finally:
os.chdir(ori_dir)
return dstfile
|
54,670 |
def test_reverse_ens(rotkehlchen_api_server):
"""Test that we can reverse resolve ENS names"""
db = DBEns(rotkehlchen_api_server.rest_api.rotkehlchen.data.db)
db_conn = rotkehlchen_api_server.rest_api.rotkehlchen.data.db.conn
addrs_1 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0x2b888954421b424c5d3d9ce9bb67c9bd47537d12'),
]
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_1},
)
result = assert_proper_response_with_result(response)
expected_resp_1 = {
addrs_1[0]: 'rotki.eth',
addrs_1[1]: 'lefteris.eth',
}
assert result == expected_resp_1
addrs_2 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0xa4b73b39f73f73655e9fdc5d167c21b3fa4a1ed6'),
to_checksum_address('0x71C7656EC7ab88b098defB751B7401B5f6d8976F'),
]
timestamps_before_request = _get_timestamps(db, addrs_1)
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_2},
)
result = assert_proper_response_with_result(response)
all_addrs = list(set(addrs_1) | set(addrs_2))
expected_resp_2 = {
addrs_2[0]: 'rotki.eth',
addrs_2[1]: 'abc.eth',
}
assert result == expected_resp_2
timestamps_after_request = _get_timestamps(db, addrs_1)
assert timestamps_before_request == timestamps_after_request
# Going to check that after request with ignore_cache ens_mappings will be updated
db_changes_before = db_conn.total_changes
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': ['0xqwerty']},
)
assert_error_response(
response=response,
contained_in_msg='Given value 0xqwerty is not an ethereum address',
status_code=HTTPStatus.BAD_REQUEST,
)
requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': all_addrs, 'ignore_cache': True},
)
db_changes_after = db_conn.total_changes
# Check that we have 5 updates because we have 5 rows in ens_mappings table
assert db_changes_after == 5 + db_changes_before
|
def test_reverse_ens(rotkehlchen_api_server):
"""Test that we can reverse resolve ENS names"""
db = DBEns(rotkehlchen_api_server.rest_api.rotkehlchen.data.db)
db_conn = rotkehlchen_api_server.rest_api.rotkehlchen.data.db.conn
addrs_1 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0x2b888954421b424c5d3d9ce9bb67c9bd47537d12'),
]
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_1},
)
result = assert_proper_response_with_result(response)
expected_resp_1 = {
addrs_1[0]: 'rotki.eth',
addrs_1[1]: 'lefteris.eth',
}
assert result == expected_resp_1
addrs_2 = [
to_checksum_address('0x9531c059098e3d194ff87febb587ab07b30b1306'),
to_checksum_address('0xa4b73b39f73f73655e9fdc5d167c21b3fa4a1ed6'),
to_checksum_address('0x71C7656EC7ab88b098defB751B7401B5f6d8976F'),
]
timestamps_before_request = _get_timestamps(db, addrs_1)
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': addrs_2},
)
result = assert_proper_response_with_result(response)
all_addrs = list(set(addrs_1) | set(addrs_2))
expected_resp_2 = {
addrs_2[0]: 'rotki.eth',
addrs_2[1]: 'abc.eth',
}
assert result == expected_resp_2
timestamps_after_request = _get_timestamps(db, addrs_1)
assert timestamps_before_request == timestamps_after_request
# Going to check that after request with ignore_cache ens_mappings will be updated
db_changes_before = db_conn.total_changes
response = requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': ['0xqwerty']},
)
assert_error_response(
response=response,
contained_in_msg='Given value 0xqwerty is not an ethereum address',
status_code=HTTPStatus.BAD_REQUEST,
)
requests.post(
api_url_for(
rotkehlchen_api_server,
'reverseensresource',
),
json={'ethereum_addresses': all_addrs, 'ignore_cache': True},
)
db_changes_after = db_conn.total_changes
# Check that we have 5 updates because we have 5 rows in ens_mappings table
assert db_changes_after == 5 + db_changes_before
|
31,772 |
def fetch_cases(client: Client, limit: int, fetch_time: str,
case_tags_filter: str, case_status_filter: str, case_priority_filter: str):
case_incidents = []
last_run = demisto.getLastRun()
case_last_run = last_run.get('CaseLastRun')
next_run = dateparser.parse(fetch_time).strftime("%Y-%m-%dT%H:%M:%SZ")
cases_list_args = {'count': limit}
if case_last_run:
cases_list_args['timestamp_filter_type'] = 'createdAfter' # type: ignore
cases_list_args['timestamp'] = case_last_run
elif next_run:
cases_list_args['timestamp_filter_type'] = 'createdAfter' # type: ignore
cases_list_args['timestamp'] = next_run
# filter cases
if case_tags_filter:
cases_list_args['tags'] = case_tags_filter # type: ignore
if case_status_filter:
cases_list_args['status'] = str(CASE_STATUS.get(case_status_filter)) # type: ignore
if case_priority_filter:
cases_list_args['priority'] = case_priority_filter # type: ignore
cases = client.cases_list_request(**cases_list_args)
for case in cases:
case['incidentType'] = 'Case'
incident = {
'name': f'Case #{str(case.get("number"))} {case.get("name")}',
'occurred': case.get('dateCreated'),
'rawJSON': json.dumps(case)
}
case_incidents.append(incident)
if cases:
last_run['CaseLastRun'] = cases[-1].get('dateCreated')
demisto.setLastRun(last_run)
return case_incidents
|
def fetch_cases(client: Client, limit: int, fetch_time: str,
case_tags_filter: str, case_status_filter: str, case_priority_filter: str):
case_incidents = []
last_run = demisto.getLastRun()
case_last_run = last_run.get('CaseLastRun')
next_run = dateparser.parse(fetch_time).strftime("%Y-%m-%dT%H:%M:%SZ")
cases_list_args = {'count': limit}
if case_last_run:
cases_list_args['timestamp_filter_type'] = 'createdAfter' # type: ignore
cases_list_args['timestamp'] = case_last_run
elif next_run:
cases_list_args['timestamp_filter_type'] = 'createdAfter' # type: ignore
cases_list_args['timestamp'] = next_run
# filter cases
if case_tags_filter:
cases_list_args['tags'] = case_tags_filter # type: ignore
if case_status_filter:
cases_list_args['status'] = str(CASE_STATUS.get(case_status_filter)) # type: ignore
if case_priority_filter:
cases_list_args['priority'] = case_priority_filter # type: ignore
cases = client.cases_list_request(**cases_list_args)
for case in cases:
case['incidentType'] = 'Case'
incident = {
'name': f'Case #{case.get("number")} {case.get("name")}',
'occurred': case.get('dateCreated'),
'rawJSON': json.dumps(case)
}
case_incidents.append(incident)
if cases:
last_run['CaseLastRun'] = cases[-1].get('dateCreated')
demisto.setLastRun(last_run)
return case_incidents
|
32,314 |
def get_public_ip_details_command(client: MsGraphClient, args: dict):
resource_group = args.get('resource_group')
address_name = args.get('address_name')
response = client.get_public_ip_details(resource_group, address_name)
# Retrieve relevant properties to return to context
properties = response.get('properties')
address_id = response.get('id')
config_id = properties.get('ipConfiguration', {}).get('id')
ip_address = properties.get('ipAddress', 'NA')
ip_address_version = properties.get('publicIPAddressVersion', 'NA')
ip_address_allocation_method = properties.get('publicIPAllocationMethod', 'NA')
address_domain_name = properties.get('dnsSettings', {}).get('domainNameLabel', 'NA')
address_fqdn = properties.get('dnsSettings', {}).get('fqdn', 'NA')
config_name = response.get('name')
location = response.get('location')
ip_config = {
'PublicIPAddressID': address_id,
'PublicConfigName': config_name,
'Location': location,
'PublicConfigID': config_id,
'ResourceGroup': args.get('resource_group'),
'PublicIPAddress': ip_address,
'PublicIPAddressVersion': ip_address_version,
'PublicIPAddressAllocationMethod': ip_address_allocation_method,
'PublicIPAddressDomainName': address_domain_name,
'PublicIPAddressFQDN': address_fqdn
}
title = 'Properties of Public Address "{}"'.format(address_name)
table_headers = ['Name', 'PublicIPAddressID', 'PublicIPAddress', 'PublicIPAddressFQDN', 'ConfigName']
human_readable = tableToMarkdown(title, ip_config, headers=table_headers, removeNull=True)
entry_context = {'Azure.NetworkInterfaces.IPConfigurations(val.PublicIPAddressID === '
'obj.PublicIPAddressID)': ip_config}
return human_readable, entry_context, response
|
def get_public_ip_details_command(client: MsGraphClient, args: dict):
resource_group = args.get('resource_group')
address_name = args.get('address_name')
response = client.get_public_ip_details(resource_group, address_name)
# Retrieve relevant properties to return to context
properties = response.get('properties')
address_id = response.get('id')
config_id = properties.get('ipConfiguration', {}).get('id')
ip_address = properties.get('ipAddress', 'NA')
ip_address_version = properties.get('publicIPAddressVersion', 'NA')
ip_address_allocation_method = properties.get('publicIPAllocationMethod', 'NA')
address_domain_name = properties.get('dnsSettings', {}).get('domainNameLabel', 'NA')
address_fqdn = properties.get('dnsSettings', {}).get('fqdn', 'NA')
config_name = response.get('name')
location = response.get('location')
ip_config = {
'PublicIPAddressID': address_id,
'PublicConfigName': config_name,
'Location': location,
'PublicConfigID': config_id,
'ResourceGroup': resource_group,
'PublicIPAddress': ip_address,
'PublicIPAddressVersion': ip_address_version,
'PublicIPAddressAllocationMethod': ip_address_allocation_method,
'PublicIPAddressDomainName': address_domain_name,
'PublicIPAddressFQDN': address_fqdn
}
title = 'Properties of Public Address "{}"'.format(address_name)
table_headers = ['Name', 'PublicIPAddressID', 'PublicIPAddress', 'PublicIPAddressFQDN', 'ConfigName']
human_readable = tableToMarkdown(title, ip_config, headers=table_headers, removeNull=True)
entry_context = {'Azure.NetworkInterfaces.IPConfigurations(val.PublicIPAddressID === '
'obj.PublicIPAddressID)': ip_config}
return human_readable, entry_context, response
|
32,763 |
def _client_channel_interceptor(wrapped, instance, args, kwargs):
channel = wrapped(*args, **kwargs)
(host, port) = _parse_target_from_arguments(args, kwargs)
# DEV: we clone the pin on the grpc module and configure it for the client
# interceptor
pin = Pin.get_from(grpc)
if not pin:
return channel
tags = {
'grpc.host': host,
'grpc.port': port,
}
if pin and pin.tags:
tags.update(pin.tags)
pin = pin.clone(tags=tags)
channel = grpc.intercept_channel(channel, create_client_interceptor(pin))
return channel
|
def _client_channel_interceptor(wrapped, instance, args, kwargs):
channel = wrapped(*args, **kwargs)
(host, port) = _parse_target_from_arguments(args, kwargs)
# DEV: we clone the pin on the grpc module and configure it for the client
# interceptor
pin = Pin.get_from(grpc)
if not pin:
return channel
tags = {
'grpc.host': host,
'grpc.port': port,
}
if pin and pin.tags:
tags.update(pin.tags)
pin = pin.clone(tags=tags)
return grpc.intercept_channel(channel, create_client_interceptor(pin))
return channel
|
56,983 |
def main(args=None):
"""Starts up a development server running Oppia."""
parsed_args = _PARSER.parse_args(args=args)
# Runs cleanup function on exit.
atexit.register(cleanup)
# Check that there isn't a server already running.
if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
common.print_each_string_after_two_new_lines([
'WARNING',
'Could not start new server. There is already an existing server',
'running at port %s.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
clear_datastore_arg = (
'' if parsed_args.save_datastore else '--clear_datastore=true')
enable_console_arg = (
'--enable_console=true' if parsed_args.enable_console else '')
disable_host_checking_arg = (
'--enable_host_checking=false'
if parsed_args.disable_host_checking else '')
no_auto_restart = (
'--automatic_restart=no' if parsed_args.no_auto_restart else '')
build_args = ['--prod_env'] if parsed_args.prod_env else []
if parsed_args.maintenance_mode:
build_args.append('--maintenance_mode')
if parsed_args.source_maps:
build_args.append('--source_maps')
build.main(args=build_args)
app_yaml_filepath = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'
# Set up a local dev instance.
# TODO(sll): Do this in a new shell.
# To turn emailing on, add the option '--enable_sendmail=yes' and change the
# relevant settings in feconf.py. Be careful with this -- you do not want to
# spam people accidentally.
background_processes = []
if not parsed_args.prod_env:
# In prod mode webpack is launched through scripts/build.py
python_utils.PRINT('Compiling webpack...')
if parsed_args.source_maps:
background_processes.append(subprocess.Popen([
common.NODE_BIN_PATH,
os.path.join(
common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
'--config', 'webpack.dev.sourcemap.config.ts', '--watch']))
else:
background_processes.append(subprocess.Popen([
common.NODE_BIN_PATH,
os.path.join(
common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
'--config', 'webpack.dev.config.ts', '--watch']))
# Give webpack few seconds to do the initial compilation.
time.sleep(10)
python_utils.PRINT('Starting GAE development server')
background_processes.append(subprocess.Popen(
'python %s/dev_appserver.py %s %s %s --admin_host 0.0.0.0 --admin_port '
'8000 --host 0.0.0.0 --port %s %s --skip_sdk_update_check true %s' % (
common.GOOGLE_APP_ENGINE_HOME, clear_datastore_arg,
enable_console_arg, disable_host_checking_arg, no_auto_restart,
python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
app_yaml_filepath), shell=True))
# Wait for the servers to come up.
while not common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
time.sleep(1)
# Launch a browser window.
if common.is_linux_os() and not parsed_args.no_browser:
detect_virtualbox_pattern = re.compile('.*VBOX.*')
if list(filter(
detect_virtualbox_pattern.match,
os.listdir('/dev/disk/by-id/'))):
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server. You can access this '
'server',
'by navigating to localhost:%s in a browser window.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
else:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server at localhost:%s. '
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
'Opening a default browser window pointing to this server'])
time.sleep(5)
background_processes.append(
subprocess.Popen([
'xdg-open', 'http://localhost:%s/'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
elif common.is_mac_os() and not parsed_args.no_browser:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server at localhost:%s. '
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
'Opening a default browser window pointing to this server.'])
time.sleep(5)
background_processes.append(
subprocess.Popen([
'open', 'http://localhost:%s/'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
else:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server. You can access this server',
'by navigating to localhost:%s in a browser window.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
python_utils.PRINT('Done!')
for process in background_processes:
process.wait()
|
def main(args=None):
"""Starts up a development server running Oppia."""
parsed_args = _PARSER.parse_args(args=args)
# Runs cleanup function on exit.
atexit.register(cleanup)
# Check that there isn't a server already running.
if common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
common.print_each_string_after_two_new_lines([
'WARNING',
'Could not start new server. There is already an existing server',
'running at port %s.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
clear_datastore_arg = (
'' if parsed_args.save_datastore else '--clear_datastore=true')
enable_console_arg = (
'--enable_console=true' if parsed_args.enable_console else '')
disable_host_checking_arg = (
'--enable_host_checking=false'
if parsed_args.disable_host_checking else '')
no_auto_restart = (
'--automatic_restart=no' if parsed_args.no_auto_restart else '')
build_args = ['--prod_env'] if parsed_args.prod_env else []
if parsed_args.maintenance_mode:
build_args.append('--maintenance_mode')
if parsed_args.source_maps:
build_args.append('--source_maps')
build.main(args=build_args)
app_yaml_filepath = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'
# Set up a local dev instance.
# TODO(sll): Do this in a new shell.
# To turn emailing on, add the option '--enable_sendmail=yes' and change the
# relevant settings in feconf.py. Be careful with this -- you do not want to
# spam people accidentally.
background_processes = []
if not parsed_args.prod_env:
# In prod mode webpack is launched through scripts/build.py
python_utils.PRINT('Compiling webpack...')
webpack_config_file = (
'webpack.dev.sourcemap.config.ts' if parsed_args.source_maps
else 'webpack.dev.config.ts')
background_processes.append(subprocess.Popen([
common.NODE_BIN_PATH,
os.path.join(
common.NODE_MODULES_PATH, 'webpack', 'bin', 'webpack.js'),
'--config', webpack_config_file, '--watch']))
# Give webpack few seconds to do the initial compilation.
time.sleep(10)
python_utils.PRINT('Starting GAE development server')
background_processes.append(subprocess.Popen(
'python %s/dev_appserver.py %s %s %s --admin_host 0.0.0.0 --admin_port '
'8000 --host 0.0.0.0 --port %s %s --skip_sdk_update_check true %s' % (
common.GOOGLE_APP_ENGINE_HOME, clear_datastore_arg,
enable_console_arg, disable_host_checking_arg, no_auto_restart,
python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
app_yaml_filepath), shell=True))
# Wait for the servers to come up.
while not common.is_port_open(PORT_NUMBER_FOR_GAE_SERVER):
time.sleep(1)
# Launch a browser window.
if common.is_linux_os() and not parsed_args.no_browser:
detect_virtualbox_pattern = re.compile('.*VBOX.*')
if list(filter(
detect_virtualbox_pattern.match,
os.listdir('/dev/disk/by-id/'))):
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server. You can access this '
'server',
'by navigating to localhost:%s in a browser window.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
else:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server at localhost:%s. '
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
'Opening a default browser window pointing to this server'])
time.sleep(5)
background_processes.append(
subprocess.Popen([
'xdg-open', 'http://localhost:%s/'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
elif common.is_mac_os() and not parsed_args.no_browser:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server at localhost:%s. '
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER),
'Opening a default browser window pointing to this server.'])
time.sleep(5)
background_processes.append(
subprocess.Popen([
'open', 'http://localhost:%s/'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)]))
else:
common.print_each_string_after_two_new_lines([
'INFORMATION',
'Setting up a local development server. You can access this server',
'by navigating to localhost:%s in a browser window.'
% python_utils.UNICODE(PORT_NUMBER_FOR_GAE_SERVER)])
python_utils.PRINT('Done!')
for process in background_processes:
process.wait()
|
36,683 |
def _is_unpacked_type(x: Any) -> bool:
return (
# E.g. *tuple[int]
(isinstance(x, GenericAlias) and x.__unpacked__)
or
# E.g. Unpack[tuple[int]]
(isinstance(x, _GenericAlias) and x.__origin__ is Unpack)
)
|
def _is_unpacked_type(x: object) -> bool:
return (
# E.g. *tuple[int]
(isinstance(x, GenericAlias) and x.__unpacked__)
or
# E.g. Unpack[tuple[int]]
(isinstance(x, _GenericAlias) and x.__origin__ is Unpack)
)
|
10,428 |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
launch_template=dict(type='dict',
default=None,
options=dict(
version=dict(type='str'),
launch_template_name=dict(type='str'),
launch_template_id=dict(type='str'),
),
),
mixed_instances_policy=dict(type='dict',
default=None,
options=dict(
instance_types=dict(type='list'),
)),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
lt_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(type='list', default=[
'GroupMinSize',
'GroupMaxSize',
'GroupDesiredCapacity',
'GroupInServiceInstances',
'GroupPendingInstances',
'GroupStandbyInstances',
'GroupTerminatingInstances',
'GroupTotalInstances'
])
),
)
global module
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['replace_all_instances', 'replace_instances'],
['launch_config_name', 'launch_template']]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
changed = create_changed = replace_changed = False
exists = asg_exists(connection)
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection)
elif state == 'absent':
changed = delete_autoscaling_group(connection)
module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')):
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
target_group_arns=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
launch_template=dict(type='dict',
default=None,
options=dict(
version=dict(type='str'),
launch_template_name=dict(type='str'),
launch_template_id=dict(type='str'),
),
),
mixed_instances_policy=dict(type='dict',
default=None,
options=dict(
instance_types=dict(type='list', elements='str'),
)),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
lt_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[]),
metrics_collection=dict(type='bool', default=False),
metrics_granularity=dict(type='str', default='1Minute'),
metrics_list=dict(type='list', default=[
'GroupMinSize',
'GroupMaxSize',
'GroupDesiredCapacity',
'GroupInServiceInstances',
'GroupPendingInstances',
'GroupStandbyInstances',
'GroupTerminatingInstances',
'GroupTotalInstances'
])
),
)
global module
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['replace_all_instances', 'replace_instances'],
['launch_config_name', 'launch_template']]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module,
conn_type='client',
resource='autoscaling',
region=region,
endpoint=ec2_url,
**aws_connect_params)
changed = create_changed = replace_changed = False
exists = asg_exists(connection)
if state == 'present':
create_changed, asg_properties = create_autoscaling_group(connection)
elif state == 'absent':
changed = delete_autoscaling_group(connection)
module.exit_json(changed=changed)
# Only replace instances if asg existed at start of call
if exists and (replace_all_instances or replace_instances) and (module.params.get('launch_config_name') or module.params.get('launch_template')):
replace_changed, asg_properties = replace(connection)
if create_changed or replace_changed:
changed = True
module.exit_json(changed=changed, **asg_properties)
|
21,173 |
def _get_examples_without_label(
data: Sequence[Example], label: str, pipeline: str = "ner"
) -> int:
count = 0
for eg in data:
if pipeline == "ner":
labels = [
label.split("-")[1]
for label in eg.get_aligned_ner()
if label not in ("O", "-", None)
]
if pipeline == "spancat":
labels = [
span.label_ for group in eg.reference.spans.values() for span in group
]
if label not in labels:
count += 1
return count
|
def _get_examples_without_label(
data: Sequence[Example], label: str, component: str = "ner"
) -> int:
count = 0
for eg in data:
if pipeline == "ner":
labels = [
label.split("-")[1]
for label in eg.get_aligned_ner()
if label not in ("O", "-", None)
]
if pipeline == "spancat":
labels = [
span.label_ for group in eg.reference.spans.values() for span in group
]
if label not in labels:
count += 1
return count
|
45,988 |
def lovasz_softmax_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate multi-class intersection-over-union (IoU) loss.
According to [1], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports multi-class (C > 1) labels. For binary
labels please use the Lovasz-Hinge loss.
Args:
input: logits tensor with shape :math:`(N, C, H, W)` where C = number of classes > 1.
labels: labels tensor with shape :math:`(N, H, W)` where each value
is :math:`0 ≤ targets[i] ≤ C−1`.
Return:
a scalar with the computed loss.
Example:
>>> N = 5 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_softmax_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxNxHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] > 1:
raise ValueError(f"Invalid input shape, we expect BxNxHxW, with N > 1. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input [B, C, -1] and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=2)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, C, N = input_flatten.shape
# compute softmax over the classes axis
input_soft: torch.Tensor = F.softmax(input_flatten, dim=1)
# compute actual loss
losses: List[torch.Tensor] = []
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
for c in range(C):
foreground: torch.Tensor = (target_flatten == c).float()
class_pred: torch.Tensor = input_soft[:, c]
errors = (foreground - class_pred).abs()
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
losses.append(loss)
final_loss: torch.Tensor = torch.stack(losses, dim=0).mean()
return final_loss
|
def lovasz_softmax_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate multi-class intersection-over-union (IoU) loss.
According to [1], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports multi-class (C > 1) labels. For binary
labels please use the Lovasz-Hinge loss.
Args:
input: logits tensor with shape :math:`(N, C, H, W)` where C = number of classes > 1.
labels: labels tensor with shape :math:`(N, H, W)` where each value
is :math:`0 ≤ targets[i] ≤ C−1`.
Return:
a scalar with the computed loss.
Example:
>>> N = 5 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_softmax_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxNxHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] > 1:
raise ValueError(f"Invalid input shape, we expect BxNxHxW, with N > 1. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input [B, C, -1] and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=2)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, C, N = input_flatten.shape
# compute softmax over the classes axis
input_soft: torch.Tensor = F.softmax(input_flatten, dim=1)
# compute actual loss
losses: List[torch.Tensor] = []
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
for c in range(C):
foreground: torch.Tensor = (target_flatten == c)
class_pred: torch.Tensor = input_soft[:, c]
errors = (foreground - class_pred).abs()
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
losses.append(loss)
final_loss: torch.Tensor = torch.stack(losses, dim=0).mean()
return final_loss
|
58,767 |
def partition_for_vitis_ai(mod, params=None, target=None, **opts):
"""Partition the Relay expression for offloading operators to Vitis AI DPU
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
target : str
The DPU identifier (e.g. DPUCZDX8G-zcu104, DPUCADX8G)
Returns
-------
ret : annotated and partitioned module.
"""
if target is None:
raise ValueError("Please pass Vitis AI DPU target to partitioning function")
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
VitisAIAnnotationPass("vitis_ai", target, params),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
|
def partition_for_vitis_ai(mod, params=None, target=None, **opts):
"""Partition the Relay expression for offloading operators to Vitis AI DPU
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
target : str
The DPU identifier (e.g. DPUCZDX8G-zcu104, DPUCADX8G)
Returns
-------
ret : Module
annotated and partitioned module.
"""
if target is None:
raise ValueError("Please pass Vitis AI DPU target to partitioning function")
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
VitisAIAnnotationPass("vitis_ai", target, params),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
|
37,832 |
def build_description_from_identifier(identifier: str) -> str:
python_identifier, _, platform_identifier = identifier.partition("-")
build_description = ""
python_interpreter = python_identifier[0:2]
python_version = python_identifier[2:]
if python_interpreter == "cp":
build_description += "CPython"
elif python_interpreter == "pp":
build_description += "PyPy"
else:
msg = "unknown python {python_interpreter!r}"
raise Exception(msg)
build_description += f" {python_version[0]}.{python_version[1:]} "
try:
build_description += PLATFORM_IDENTIFIER_DESCRIPTIONS[platform_identifier]
except KeyError as e:
msg = f"unknown platform {platform_identifier!r}"
raise Exception(msg) from e
return build_description
|
def build_description_from_identifier(identifier: str) -> str:
python_identifier, _, platform_identifier = identifier.partition("-")
build_description = ""
python_interpreter = python_identifier[0:2]
python_version = python_identifier[2:]
if python_interpreter == "cp":
build_description += "CPython"
elif python_interpreter == "pp":
build_description += "PyPy"
else:
msg = f"unknown python {python_interpreter!r}"
raise Exception(msg)
build_description += f" {python_version[0]}.{python_version[1:]} "
try:
build_description += PLATFORM_IDENTIFIER_DESCRIPTIONS[platform_identifier]
except KeyError as e:
msg = f"unknown platform {platform_identifier!r}"
raise Exception(msg) from e
return build_description
|
22,519 |
def post_fork(server, worker):
"""
Put worker_id and listeners into an env variable for further use within the app.
"""
os.environ["GUNICORN_WORKER_ID"] = str(worker._worker_id)
os.environ["GUNICORN_LISTENERS"] = ",".join([str(bind) for bind in server.LISTENERS])
if "--preload" in os.environ.get("GUNICORN_CMD_ARGS", "") or "--preload" in sys.argv:
from galaxy.web_stack import GunicornApplicationStack
GunicornApplicationStack.late_postfork_event.set()
|
def post_fork(server, worker):
"""
Put worker_id and listeners into an env variable for further use within the app.
"""
os.environ["GUNICORN_WORKER_ID"] = str(worker._worker_id)
os.environ["GUNICORN_LISTENERS"] = ",".join(str(bind) for bind in server.LISTENERS)
if "--preload" in os.environ.get("GUNICORN_CMD_ARGS", "") or "--preload" in sys.argv:
from galaxy.web_stack import GunicornApplicationStack
GunicornApplicationStack.late_postfork_event.set()
|
5,665 |
def _f_vratio(u, ineps, mp):
"""Function to solve in _solve_vratio"""
[s, c, *_] = special.ellipj(u, mp)
return c * ineps - s
|
def _f_vratio(u, ineps, mp):
"""Function to solve in _solve_vratio"""
s, c = special.ellipj(u, mp)[0:2]
return c * ineps - s
|
25,990 |
def load_arguments(self, _):
from argcomplete.completers import FilesCompleter
from argcomplete.completers import DirectoriesCompleter
from azure.mgmt.resource.locks.models import LockLevel
from azure.mgmt.resource.managedapplications.models import ApplicationLockLevel
from azure.mgmt.resource.policy.models import (ExemptionCategory, EnforcementMode)
from azure.cli.core.api import get_subscription_id_list
from azure.cli.core.commands.parameters import (
resource_group_name_type, get_location_type, tag_type, tags_type, get_resource_group_completion_list, no_wait_type, file_type,
get_enum_type, get_three_state_flag)
from azure.cli.core.profiles import ResourceType
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.command_modules.resource._completers import (
get_policy_completion_list, get_policy_set_completion_list, get_policy_assignment_completion_list, get_policy_exemption_completion_list,
get_resource_types_completion_list, get_providers_completion_list)
from azure.cli.command_modules.resource._validators import (
validate_lock_parameters, validate_resource_lock, validate_group_lock, validate_subscription_lock, validate_metadata, RollbackAction,
validate_msi)
from azure.cli.command_modules.resource.parameters import TagUpdateOperation
DeploymentMode, WhatIfResultFormat, ChangeType = self.get_models('DeploymentMode', 'WhatIfResultFormat', 'ChangeType')
# BASIC PARAMETER CONFIGURATION
resource_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The resource name. (Ex: myC)')
resource_type_type = CLIArgumentType(help="The resource type (Ex: 'resC'). Can also accept namespace/type format (Ex: 'Microsoft.Provider/resC')")
resource_namespace_type = CLIArgumentType(options_list='--namespace', completer=get_providers_completion_list, help="Provider namespace (Ex: 'Microsoft.Provider')")
resource_parent_type = CLIArgumentType(required=False, options_list=['--parent'], help="The parent path (Ex: 'resA/myA/resB/myB')")
existing_policy_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_completion_list, help='The policy definition name.')
existing_policy_set_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_set_completion_list, help='The policy set definition name.')
subscription_type = CLIArgumentType(options_list='--subscription', FilesCompleter=get_subscription_id_list, help='The subscription id of the policy [set] definition.')
management_group_name_type = CLIArgumentType(options_list='--management-group', help='The name of the management group of the policy [set] definition.')
identity_scope_type = CLIArgumentType(help="Scope that the system assigned identity can access")
identity_role_type = CLIArgumentType(options_list=['--role'], help="Role name or id that will be assigned to the managed identity")
extended_json_format_type = CLIArgumentType(options_list=['--handle-extended-json-format', '-j'], action='store_true',
help='Support to handle extended template content including multiline and comments in deployment')
deployment_name_type = CLIArgumentType(options_list=['--name', '-n'], required=True, help='The deployment name.')
deployment_create_name_type = CLIArgumentType(options_list=['--name', '-n'], required=False, help='The deployment name. Default to template file base name')
management_group_id_type = CLIArgumentType(options_list=['--management-group-id', '-m'], required=True, help='The management group id.')
deployment_template_file_type = CLIArgumentType(options_list=['--template-file', '-f'], completer=FilesCompleter(), type=file_type,
help="a path to a template file or Bicep file in the file system")
deployment_template_uri_type = CLIArgumentType(options_list=['--template-uri', '-u'], help='a uri to a remote template file')
deployment_template_spec_type = CLIArgumentType(options_list=['--template-spec', '-s'], min_api='2019-06-01', help="The template spec resource id.")
deployment_query_string_type = CLIArgumentType(options_list=['--query-string', '-q'], help="The query string (a SAS token) to be used with the template-uri in the case of linked templates.")
deployment_parameters_type = CLIArgumentType(options_list=['--parameters', '-p'], action='append', nargs='+', completer=FilesCompleter(), help='the deployment parameters')
filter_type = CLIArgumentType(options_list=['--filter'], is_preview=True,
help='Filter expression using OData notation. You can use --filter "provisioningState eq \'{state}\'" to filter provisioningState. '
'To get more information, please visit https://docs.microsoft.com/rest/api/resources/deployments/listatsubscriptionscope#uri-parameters')
no_prompt = CLIArgumentType(arg_type=get_three_state_flag(), help='The option to disable the prompt of missing parameters for ARM template. '
'When the value is true, the prompt requiring users to provide missing parameter will be ignored. The default value is false.')
deployment_what_if_type = CLIArgumentType(options_list=['--what-if', '-w'], action='store_true',
help='Instruct the command to run deployment What-If.',
min_api='2019-07-01')
deployment_what_if_proceed_if_no_change_type = CLIArgumentType(options_list=['--proceed-if-no-change'], action='store_true',
help='Instruct the command to execute the deployment if the What-If result contains no resource changes. Applicable when --confirm-with-what-if is set.',
min_api='2019-07-01')
deployment_what_if_result_format_type = CLIArgumentType(options_list=['--result-format', '-r'],
arg_type=get_enum_type(WhatIfResultFormat, "FullResourcePayloads"),
min_api='2019-07-01')
deployment_what_if_no_pretty_print_type = CLIArgumentType(options_list=['--no-pretty-print'], action='store_true',
help='Disable pretty-print for What-If results. When set, the output format type will be used.')
deployment_what_if_confirmation_type = CLIArgumentType(options_list=['--confirm-with-what-if', '-c'], action='store_true',
help='Instruct the command to run deployment What-If before executing the deployment. It then prompts you to acknowledge resource changes before it continues.',
min_api='2019-07-01')
deployment_what_if_exclude_change_types_type = CLIArgumentType(nargs="+", options_list=['--exclude-change-types', '-x'],
arg_type=get_enum_type(ChangeType),
help='Space-separated list of resource change types to be excluded from What-If results.',
min_api='2019-07-01')
tag_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The tag name.')
tag_value_type = CLIArgumentType(options_list='--value', help='The tag value.')
tag_resource_id_type = CLIArgumentType(options_list='--resource-id',
help='The resource identifier for the tagged entity. A resource, a resource group or a subscription may be tagged.',
min_api='2019-10-01')
latest_include_preview_type = CLIArgumentType(options_list=['--latest-include-preview', '-v'], is_preview=True,
action='store_true', arg_group='Resource Id',
help='Indicate that the latest api-version will be used regardless of whether it is preview version (like 2020-01-01-preview) or not. '
'For example, if the supported api-version of resource provider is 2020-01-01-preview and 2019-01-01: '
'when passing in this parameter it will take the latest version 2020-01-01-preview, otherwise it will take the latest stable version 2019-01-01 without passing in this parameter')
ts_display_name_type = CLIArgumentType(options_list=['--display-name', '-d'], help='The display name of the template spec')
ts_description_type = CLIArgumentType(options_list=['--description'], help='The description of the parent template spec.')
ts_version_description_type = CLIArgumentType(options_list=['--version-description'], help='The description of the template spec version.')
ui_form_definition_file_type = CLIArgumentType(options_list=['--ui-form-definition'], completer=FilesCompleter(), type=file_type,
help="A path to a uiFormDefinition file in the file system")
_PROVIDER_HELP_TEXT = 'the resource namespace, aka \'provider\''
with self.argument_context('resource') as c:
c.argument('no_wait', no_wait_type)
c.argument('resource_group_name', resource_group_name_type, arg_group='Resource Id')
c.ignore('resource_id')
c.argument('resource_name', resource_name_type, arg_group='Resource Id')
c.argument('api_version', help='The api version of the resource (omit for the latest stable version)', required=False, arg_group='Resource Id')
c.argument('resource_provider_namespace', resource_namespace_type, arg_group='Resource Id')
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list, arg_group='Resource Id')
c.argument('parent_resource_path', resource_parent_type, arg_group='Resource Id')
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_ids', nargs='+', options_list=['--ids'], help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.', arg_group='Resource Id')
c.argument('include_response_body', arg_type=get_three_state_flag(), help='Use if the default command output doesn\'t capture all of the property data.')
c.argument('latest_include_preview', latest_include_preview_type)
with self.argument_context('resource list') as c:
c.argument('name', resource_name_type)
with self.argument_context('resource move') as c:
c.argument('ids', nargs='+')
with self.argument_context('resource invoke-action') as c:
c.argument('action', help='The action that will be invoked on the specified resource')
c.argument('request_body', help='JSON encoded parameter arguments for the action that will be passed along in the post request body. Use @{file} to load from a file.')
with self.argument_context('resource create') as c:
c.argument('resource_id', options_list=['--id'], help='Resource ID.', action=None)
c.argument('properties', options_list=['--properties', '-p'], help='a JSON-formatted string containing resource properties')
c.argument('is_full_object', action='store_true', help='Indicate that the properties object includes other options such as location, tags, sku, and/or plan.')
with self.argument_context('resource link') as c:
c.argument('target_id', options_list=['--target', c.deprecate(target='--target-id', redirect='--target', hide=True)], help='Fully-qualified resource ID of the resource link target.')
c.argument('link_id', options_list=['--link', c.deprecate(target='--link-id', redirect='--link', hide=True)], help='Fully-qualified resource ID of the resource link.')
c.argument('notes', help='Notes for the link.')
c.argument('scope', help='Fully-qualified scope for retrieving links.')
c.argument('filter_string', options_list=['--filter', c.deprecate(target='--filter-string', redirect='--filter', hide=True)], help='Filter string for limiting results.')
with self.argument_context('resource tag') as c:
c.argument('is_incremental', action='store_true', options_list=['--is-incremental', '-i'],
help='The option to add tags incrementally without deleting the original tags. If the key of new tag and original tag are duplicated, the original value will be overwritten.')
with self.argument_context('resource wait') as c:
c.ignore('latest_include_preview')
with self.argument_context('provider') as c:
c.ignore('top')
c.argument('resource_provider_namespace', options_list=['--namespace', '-n'], completer=get_providers_completion_list, help=_PROVIDER_HELP_TEXT)
with self.argument_context('provider register') as c:
c.argument('mg', help="The management group id to register.", options_list=['--management-group-id', '-m'])
c.argument('accept_terms', action='store_true', is_preview=True, help="Accept market place terms and RP terms for RPaaS. Required when registering RPs from RPaaS, such as 'Microsoft.Confluent' and 'Microsoft.Datadog'.", deprecate_info=c.deprecate(hide=True))
c.argument('wait', action='store_true', help='wait for the registration to finish')
c.argument('consent_to_permissions', options_list=['--consent-to-permissions', '-c'], action='store_true', help='A value indicating whether authorization is consented or not.')
with self.argument_context('provider unregister') as c:
c.argument('wait', action='store_true', help='wait for unregistration to finish')
with self.argument_context('provider operation') as c:
c.argument('api_version', help="The api version of the 'Microsoft.Authorization/providerOperations' resource (omit for the latest stable version)")
with self.argument_context('feature') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT)
c.argument('feature_name', options_list=['--name', '-n'], help='the feature name')
with self.argument_context('feature list') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT)
with self.argument_context('feature registration') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT)
c.argument('feature_name', options_list=['--name', '-n'], help='the feature name')
with self.argument_context('feature registration list') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT)
with self.argument_context('policy') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group where the policy will be applied')
with self.argument_context('policy definition', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_definition_name', arg_type=existing_policy_definition_name_type)
c.argument('rules', help='JSON formatted string or a path to a file with such content', type=file_type, completer=FilesCompleter())
c.argument('display_name', help='Display name of policy definition.')
c.argument('description', help='Description of policy definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
c.argument('metadata', min_api='2017-06-01-preview', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
c.argument('management_group', arg_type=management_group_name_type)
c.argument('mode', options_list=['--mode', '-m'], help='Mode of the policy definition, e.g. All, Indexed. Please visit https://aka.ms/azure-policy-mode for more information.', min_api='2016-12-01')
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy definition create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy definition.')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.ignore('_subscription')
c.argument('name', options_list=['--name', '-n'], completer=get_policy_assignment_completion_list, help='Name of the policy assignment.')
c.argument('scope', help='Scope to which this policy assignment applies.')
c.argument('disable_scope_strict_match', action='store_true', help='Include policy assignments either inherited from parent scope or at child scope.')
c.argument('display_name', help='Display name of the policy assignment.')
c.argument('description', help='Description of the policy assignment.', min_api='2016-12-01')
c.argument('policy', help='Name or id of the policy definition.', completer=get_policy_completion_list)
c.argument('params', options_list=['--params', '-p'], help='JSON formatted string or a path to a file or uri with parameter values of the policy rule.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as c:
c.argument('policy_set_definition', options_list=['--policy-set-definition', '-d'], help='Name or id of the policy set definition.')
c.argument('sku', options_list=['--sku', '-s'], help='policy sku.', arg_type=get_enum_type(['free', 'standard']), deprecate_info=c.deprecate(hide=True))
c.argument('notscopes', options_list='--not-scopes', nargs='+')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, arg_group='Managed Identity', min_api='2018-05-01') as c:
c.argument('assign_identity', nargs='*', validator=validate_msi, help="Assigns a system assigned identity to the policy assignment.")
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2019-06-01') as c:
c.argument('enforcement_mode', options_list=['--enforcement-mode', '-e'], help='Enforcement mode of the policy assignment, e.g. Default, DoNotEnforce. Please visit https://aka.ms/azure-policyAssignment-enforcement-mode for more information.', arg_type=get_enum_type(EnforcementMode))
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy assignment.')
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), help='The location of the policy assignment. Only required when utilizing managed identity.')
with self.argument_context('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy assignment non-compliance-message', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as c:
c.argument('message', options_list=['--message', '-m'], help='Message that will be shown when a resource is denied by policy or evaluation details are inspected.')
c.argument('policy_definition_reference_id', options_list=['--policy-definition-reference-id', '-r'], help='Policy definition reference ID within the assigned initiative (policy set) that the message applies to.')
with self.argument_context('policy set-definition', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_set_definition_name', arg_type=existing_policy_set_definition_name_type)
c.argument('display_name', help='Display name of policy set definition.')
c.argument('description', help='Description of policy set definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter())
c.argument('definitions', help='JSON formatted string or a path to a file or uri containing definitions.', type=file_type, completer=FilesCompleter())
c.argument('definition_groups', min_api='2019-09-01', help='JSON formatted string or a path to a file or uri containing policy definition groups. Groups are used to organize policy definitions within a policy set.', type=file_type, completer=FilesCompleter())
c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
c.argument('management_group', arg_type=management_group_name_type)
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy set-definition create', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy set definition.')
with self.argument_context('policy exemption', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.ignore('_subscription')
c.argument('name', options_list=['--name', '-n'], completer=get_policy_exemption_completion_list, help='Name of the policy exemption.')
c.argument('scope', help='Scope to which this policy exemption applies.')
c.argument('disable_scope_strict_match', options_list=['--disable-scope-strict-match', '-i'], action='store_true', help='Include policy exemptions either inherited from parent scope or at child scope.')
c.argument('display_name', help='Display name of the policy exemption.')
c.argument('description', help='Description of policy exemption.')
c.argument('exemption_category', options_list=['--exemption-category', '-e'], help='The policy exemption category of the policy exemption', arg_type=get_enum_type(ExemptionCategory))
c.argument('policy_definition_reference_ids', nargs='+', options_list=['--policy-definition-reference-ids', '-r'], help='The policy definition reference ids to exempt in the initiative (policy set).')
c.argument('expires_on', help='The expiration date and time (in UTC ISO 8601 format yyyy-MM-ddTHH:mm:ssZ) of the policy exemption.')
c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
with self.argument_context('policy exemption create', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy exemption.')
c.argument('policy_assignment', options_list=['--policy-assignment', '-a'], help='The referenced policy assignment Id for the policy exemption.')
with self.argument_context('group') as c:
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g'])
with self.argument_context('group deployment') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list)
c.argument('deployment_name', arg_type=deployment_name_type)
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('template_uri', arg_type=deployment_template_uri_type)
c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'),
help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)')
c.argument('parameters', arg_type=deployment_parameters_type)
c.argument('rollback_on_error', nargs='?', action=RollbackAction,
help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.')
with self.argument_context('group deployment create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'],
help='Auxiliary subscriptions which will be used during deployment across tenants.',
deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants'))
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('group deployment validate') as c:
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('group deployment list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('group deployment operation show') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('deployment') as c:
c.argument('deployment_name', arg_type=deployment_name_type)
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('template_uri', arg_type=deployment_template_uri_type)
c.argument('template_spec', arg_type=deployment_template_spec_type)
c.argument('query_string', arg_type=deployment_query_string_type)
c.argument('parameters', arg_type=deployment_parameters_type)
with self.argument_context('deployment create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment operation') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('deployment list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment sub') as c:
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment sub create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment sub what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment sub validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment sub list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment group') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list, required=True)
c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)')
c.argument('rollback_on_error', nargs='?', action=RollbackAction,
help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.')
with self.argument_context('deployment group create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'],
help='Auxiliary subscriptions which will be used during deployment across tenants.',
deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants'))
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment group what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
c.ignore("rollback_on_error")
with self.argument_context('deployment group validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment group list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment mg') as c:
c.argument('management_group_id', arg_type=management_group_id_type)
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment mg create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api="2019-10-01")
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type, min_api="2019-10-01")
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.",
min_api="2019-10-01")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment mg what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment mg validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment mg list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment operation mg') as c:
c.argument('management_group_id', arg_type=management_group_id_type)
with self.argument_context('deployment tenant') as c:
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment tenant create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api="2019-10-01")
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type, min_api="2019-10-01")
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.",
min_api="2019-10-01")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment tenant what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment tenant validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment tenant list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('group export') as c:
c.argument('include_comments', action='store_true')
c.argument('include_parameter_default_value', action='store_true')
c.argument('skip_resource_name_params', action='store_true')
c.argument('skip_all_params', action='store_true')
c.argument('resource_ids', nargs='+', options_list='--resource-ids')
with self.argument_context('group create') as c:
c.argument('rg_name', options_list=['--name', '--resource-group', '-n', '-g'],
help='name of the new resource group', completer=None,
local_context_attribute=LocalContextAttribute(
name='resource_group_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('managed_by', min_api='2016-09-01', help='The ID of the resource that manages this resource group.')
with self.argument_context('group delete') as c:
c.argument('resource_group_name', resource_group_name_type,
options_list=['--name', '-n', '--resource-group', '-g'], local_context_attribute=None)
with self.argument_context('tag') as c:
c.argument('tag_name', tag_name_type)
c.argument('tag_value', tag_value_type)
c.argument('resource_id', tag_resource_id_type)
c.argument('tags', tags_type)
c.argument('operation', arg_type=get_enum_type([item.value for item in list(TagUpdateOperation)]),
help='The update operation: options include Merge, Replace and Delete.')
with self.argument_context('lock') as c:
c.argument('lock_name', options_list=['--name', '-n'], validator=validate_lock_parameters)
c.argument('level', arg_type=get_enum_type(LockLevel), options_list=['--lock-type', '-t'], help='The type of lock restriction.')
c.argument('parent_resource_path', resource_parent_type)
c.argument('resource_provider_namespace', resource_namespace_type)
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='Name or ID of the resource being locked. If an ID is given, other resource arguments should not be given.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('resource_group', resource_group_name_type, validator=validate_lock_parameters)
with self.argument_context('resource lock') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='If an ID is given, other resource arguments should not be given.', validator=validate_resource_lock)
with self.argument_context('group lock') as c:
c.argument('resource_group', resource_group_name_type, validator=validate_group_lock, id_part=None)
with self.argument_context('group lock create') as c:
c.argument('resource_group', required=True)
with self.argument_context('account lock') as c:
c.argument('resource_group', ignore_type, validator=validate_subscription_lock)
for scope in ['account', 'group']:
with self.argument_context('{} lock'.format(scope)) as c:
c.ignore('resource_provider_namespace', 'parent_resource_path', 'resource_type', 'resource_name')
for scope in ['lock', 'account lock', 'group lock', 'resource lock']:
with self.argument_context(scope) as c:
c.argument('lock_name', options_list=['--name', '-n'], help='Name of the lock')
c.argument('level', options_list=['--lock-type', '-t'], arg_type=get_enum_type([LockLevel.can_not_delete, LockLevel.read_only]), help='The type of lock restriction.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('notes', help='Notes about this lock.')
with self.argument_context('managedapp') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application', id_part='resource_group')
c.argument('application_name', options_list=['--name', '-n'], id_part='name')
c.argument('tags', tags_type)
with self.argument_context('managedapp definition') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application definition', id_part='resource_group')
c.argument('application_definition_name', options_list=['--name', '-n'], id_part='name')
with self.argument_context('managedapp create') as c:
c.argument('name', options_list=['--name', '-n'], help='name of the new managed application', completer=None)
c.argument('location', help='the managed application location')
c.argument('managedapp_definition_id', options_list=['--managedapp-definition-id', '-d'], help='the full qualified managed application definition id')
c.argument('managedby_resource_group_id', options_list=['--managed-rg-id', '-m'], help='the resource group managed by the managed application')
c.argument('parameters', help='JSON formatted string or a path to a file with such content', type=file_type)
for operation in ['create', 'update']:
with self.argument_context('managedapp definition {}'.format(operation)) as c:
c.argument('lock_level', arg_type=get_enum_type(ApplicationLockLevel), help='The type of lock restriction.')
c.argument('authorizations', options_list=['--authorizations', '-a'], nargs='+', help="space-separated authorization pairs in a format of `<principalId>:<roleDefinitionId>`")
c.argument('create_ui_definition', options_list=['--create-ui-definition', '-c'], help='JSON formatted string or a path to a file with such content', type=file_type)
c.argument('main_template', options_list=['--main-template', '-t'], help='JSON formatted string or a path to a file with such content', type=file_type)
with self.argument_context('account') as c:
c.argument('subscription', options_list=['--subscription', '-s'], help='Name or ID of subscription.', completer=get_subscription_id_list)
c.ignore('_subscription') # hide global subscription parameter
with self.argument_context('account management-group') as c:
c.argument('group_name', options_list=['--name', '-n'])
c.argument('no_register', arg_type=get_three_state_flag(), help='Do not register resource programs')
with self.argument_context('account management-group show') as c:
c.argument('expand', options_list=['--expand', '-e'], action='store_true')
c.argument('recurse', options_list=['--recurse', '-r'], action='store_true')
with self.argument_context('account management-group create') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent', options_list=['--parent', '-p'])
with self.argument_context('account management-group update') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent_id', options_list=['--parent', '-p'])
with self.argument_context('ts') as c:
c.argument('name', options_list=['--name', '-n'], help='The name of the template spec.')
c.argument('version', options_list=['--version', '-v'], help='The template spec version.')
with self.argument_context('ts create') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.')
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.')
c.argument('location', options_list=['--location', '-l'], help='The location to store the template-spec and template-spec version(s). Cannot be changed after creation.')
c.argument('display_name', arg_type=ts_display_name_type)
c.argument('description', arg_type=ts_description_type)
c.argument('version_description', arg_type=ts_version_description_type)
c.argument('tags', tags_type)
c.argument('no_prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
with self.argument_context('ts update') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.')
c.argument('template_spec', arg_type=deployment_template_spec_type)
c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.')
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('display_name', arg_type=ts_display_name_type)
c.argument('description', arg_type=ts_description_type)
c.argument('version_description', arg_type=ts_version_description_type)
c.argument('tags', tags_type)
with self.argument_context('ts show') as c:
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts export') as c:
c.argument('output_folder', options_list=['--output-folder'], help='Existing folder to output export(s).')
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts delete') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group where the template spec or template spec version is stored.')
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts list') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
with self.argument_context('bicep build') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the Bicep file to build in the file system."))
c.argument('outdir', arg_type=CLIArgumentType(options_list=['--outdir'], completer=DirectoriesCompleter(),
help="When set, saves the output at the specified directory."))
c.argument('outfile', arg_type=CLIArgumentType(options_list=['--outfile'], completer=FilesCompleter(),
help="When set, saves the output as the specified file path."))
c.argument('stdout', arg_type=CLIArgumentType(options_list=['--stdout'], action='store_true',
help="When set, prints all output to stdout instead of corresponding files."))
with self.argument_context('bicep decompile') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the ARM template to decompile in the file system."))
with self.argument_context('bicep publish') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the Bicep module file to publish in the file system."))
c.argument('target', arg_type=CLIArgumentType(options_list=['--target', '-t'],
help="The target location where the Bicep module will be published."))
with self.argument_context('bicep install') as c:
c.argument('version', options_list=['--version', '-v'], help='The version of Bicep CLI to be installed. Default to the latest if not specified.')
|
def load_arguments(self, _):
from argcomplete.completers import FilesCompleter
from argcomplete.completers import DirectoriesCompleter
from azure.mgmt.resource.locks.models import LockLevel
from azure.mgmt.resource.managedapplications.models import ApplicationLockLevel
from azure.mgmt.resource.policy.models import (ExemptionCategory, EnforcementMode)
from azure.cli.core.api import get_subscription_id_list
from azure.cli.core.commands.parameters import (
resource_group_name_type, get_location_type, tag_type, tags_type, get_resource_group_completion_list, no_wait_type, file_type,
get_enum_type, get_three_state_flag)
from azure.cli.core.profiles import ResourceType
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.command_modules.resource._completers import (
get_policy_completion_list, get_policy_set_completion_list, get_policy_assignment_completion_list, get_policy_exemption_completion_list,
get_resource_types_completion_list, get_providers_completion_list)
from azure.cli.command_modules.resource._validators import (
validate_lock_parameters, validate_resource_lock, validate_group_lock, validate_subscription_lock, validate_metadata, RollbackAction,
validate_msi)
from azure.cli.command_modules.resource.parameters import TagUpdateOperation
DeploymentMode, WhatIfResultFormat, ChangeType = self.get_models('DeploymentMode', 'WhatIfResultFormat', 'ChangeType')
# BASIC PARAMETER CONFIGURATION
resource_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The resource name. (Ex: myC)')
resource_type_type = CLIArgumentType(help="The resource type (Ex: 'resC'). Can also accept namespace/type format (Ex: 'Microsoft.Provider/resC')")
resource_namespace_type = CLIArgumentType(options_list='--namespace', completer=get_providers_completion_list, help="Provider namespace (Ex: 'Microsoft.Provider')")
resource_parent_type = CLIArgumentType(required=False, options_list=['--parent'], help="The parent path (Ex: 'resA/myA/resB/myB')")
existing_policy_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_completion_list, help='The policy definition name.')
existing_policy_set_definition_name_type = CLIArgumentType(options_list=['--name', '-n'], completer=get_policy_set_completion_list, help='The policy set definition name.')
subscription_type = CLIArgumentType(options_list='--subscription', FilesCompleter=get_subscription_id_list, help='The subscription id of the policy [set] definition.')
management_group_name_type = CLIArgumentType(options_list='--management-group', help='The name of the management group of the policy [set] definition.')
identity_scope_type = CLIArgumentType(help="Scope that the system assigned identity can access")
identity_role_type = CLIArgumentType(options_list=['--role'], help="Role name or id that will be assigned to the managed identity")
extended_json_format_type = CLIArgumentType(options_list=['--handle-extended-json-format', '-j'], action='store_true',
help='Support to handle extended template content including multiline and comments in deployment')
deployment_name_type = CLIArgumentType(options_list=['--name', '-n'], required=True, help='The deployment name.')
deployment_create_name_type = CLIArgumentType(options_list=['--name', '-n'], required=False, help='The deployment name. Default to template file base name')
management_group_id_type = CLIArgumentType(options_list=['--management-group-id', '-m'], required=True, help='The management group id.')
deployment_template_file_type = CLIArgumentType(options_list=['--template-file', '-f'], completer=FilesCompleter(), type=file_type,
help="a path to a template file or Bicep file in the file system")
deployment_template_uri_type = CLIArgumentType(options_list=['--template-uri', '-u'], help='a uri to a remote template file')
deployment_template_spec_type = CLIArgumentType(options_list=['--template-spec', '-s'], min_api='2019-06-01', help="The template spec resource id.")
deployment_query_string_type = CLIArgumentType(options_list=['--query-string', '-q'], help="The query string (a SAS token) to be used with the template-uri in the case of linked templates.")
deployment_parameters_type = CLIArgumentType(options_list=['--parameters', '-p'], action='append', nargs='+', completer=FilesCompleter(), help='the deployment parameters')
filter_type = CLIArgumentType(options_list=['--filter'], is_preview=True,
help='Filter expression using OData notation. You can use --filter "provisioningState eq \'{state}\'" to filter provisioningState. '
'To get more information, please visit https://docs.microsoft.com/rest/api/resources/deployments/listatsubscriptionscope#uri-parameters')
no_prompt = CLIArgumentType(arg_type=get_three_state_flag(), help='The option to disable the prompt of missing parameters for ARM template. '
'When the value is true, the prompt requiring users to provide missing parameter will be ignored. The default value is false.')
deployment_what_if_type = CLIArgumentType(options_list=['--what-if', '-w'], action='store_true',
help='Instruct the command to run deployment What-If.',
min_api='2019-07-01')
deployment_what_if_proceed_if_no_change_type = CLIArgumentType(options_list=['--proceed-if-no-change'], action='store_true',
help='Instruct the command to execute the deployment if the What-If result contains no resource changes. Applicable when --confirm-with-what-if is set.',
min_api='2019-07-01')
deployment_what_if_result_format_type = CLIArgumentType(options_list=['--result-format', '-r'],
arg_type=get_enum_type(WhatIfResultFormat, "FullResourcePayloads"),
min_api='2019-07-01')
deployment_what_if_no_pretty_print_type = CLIArgumentType(options_list=['--no-pretty-print'], action='store_true',
help='Disable pretty-print for What-If results. When set, the output format type will be used.')
deployment_what_if_confirmation_type = CLIArgumentType(options_list=['--confirm-with-what-if', '-c'], action='store_true',
help='Instruct the command to run deployment What-If before executing the deployment. It then prompts you to acknowledge resource changes before it continues.',
min_api='2019-07-01')
deployment_what_if_exclude_change_types_type = CLIArgumentType(nargs="+", options_list=['--exclude-change-types', '-x'],
arg_type=get_enum_type(ChangeType),
help='Space-separated list of resource change types to be excluded from What-If results.',
min_api='2019-07-01')
tag_name_type = CLIArgumentType(options_list=['--name', '-n'], help='The tag name.')
tag_value_type = CLIArgumentType(options_list='--value', help='The tag value.')
tag_resource_id_type = CLIArgumentType(options_list='--resource-id',
help='The resource identifier for the tagged entity. A resource, a resource group or a subscription may be tagged.',
min_api='2019-10-01')
latest_include_preview_type = CLIArgumentType(options_list=['--latest-include-preview', '-v'], is_preview=True,
action='store_true', arg_group='Resource Id',
help='Indicate that the latest api-version will be used regardless of whether it is preview version (like 2020-01-01-preview) or not. '
'For example, if the supported api-version of resource provider is 2020-01-01-preview and 2019-01-01: '
'when passing in this parameter it will take the latest version 2020-01-01-preview, otherwise it will take the latest stable version 2019-01-01 without passing in this parameter')
ts_display_name_type = CLIArgumentType(options_list=['--display-name', '-d'], help='The display name of the template spec')
ts_description_type = CLIArgumentType(options_list=['--description'], help='The description of the parent template spec.')
ts_version_description_type = CLIArgumentType(options_list=['--version-description'], help='The description of the template spec version.')
ui_form_definition_file_type = CLIArgumentType(options_list=['--ui-form-definition'], completer=FilesCompleter(), type=file_type,
help="A path to a uiFormDefinition file in the file system")
_PROVIDER_HELP_TEXT = 'the resource namespace, aka \'provider\''
with self.argument_context('resource') as c:
c.argument('no_wait', no_wait_type)
c.argument('resource_group_name', resource_group_name_type, arg_group='Resource Id')
c.ignore('resource_id')
c.argument('resource_name', resource_name_type, arg_group='Resource Id')
c.argument('api_version', help='The api version of the resource (omit for the latest stable version)', required=False, arg_group='Resource Id')
c.argument('resource_provider_namespace', resource_namespace_type, arg_group='Resource Id')
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list, arg_group='Resource Id')
c.argument('parent_resource_path', resource_parent_type, arg_group='Resource Id')
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_ids', nargs='+', options_list=['--ids'], help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.', arg_group='Resource Id')
c.argument('include_response_body', arg_type=get_three_state_flag(), help='Use if the default command output doesn\'t capture all of the property data.')
c.argument('latest_include_preview', latest_include_preview_type)
with self.argument_context('resource list') as c:
c.argument('name', resource_name_type)
with self.argument_context('resource move') as c:
c.argument('ids', nargs='+')
with self.argument_context('resource invoke-action') as c:
c.argument('action', help='The action that will be invoked on the specified resource')
c.argument('request_body', help='JSON encoded parameter arguments for the action that will be passed along in the post request body. Use @{file} to load from a file.')
with self.argument_context('resource create') as c:
c.argument('resource_id', options_list=['--id'], help='Resource ID.', action=None)
c.argument('properties', options_list=['--properties', '-p'], help='a JSON-formatted string containing resource properties')
c.argument('is_full_object', action='store_true', help='Indicate that the properties object includes other options such as location, tags, sku, and/or plan.')
with self.argument_context('resource link') as c:
c.argument('target_id', options_list=['--target', c.deprecate(target='--target-id', redirect='--target', hide=True)], help='Fully-qualified resource ID of the resource link target.')
c.argument('link_id', options_list=['--link', c.deprecate(target='--link-id', redirect='--link', hide=True)], help='Fully-qualified resource ID of the resource link.')
c.argument('notes', help='Notes for the link.')
c.argument('scope', help='Fully-qualified scope for retrieving links.')
c.argument('filter_string', options_list=['--filter', c.deprecate(target='--filter-string', redirect='--filter', hide=True)], help='Filter string for limiting results.')
with self.argument_context('resource tag') as c:
c.argument('is_incremental', action='store_true', options_list=['--is-incremental', '-i'],
help='The option to add tags incrementally without deleting the original tags. If the key of new tag and original tag are duplicated, the original value will be overwritten.')
with self.argument_context('resource wait') as c:
c.ignore('latest_include_preview')
with self.argument_context('provider') as c:
c.ignore('top')
c.argument('resource_provider_namespace', options_list=['--namespace', '-n'], completer=get_providers_completion_list, help=_PROVIDER_HELP_TEXT)
with self.argument_context('provider register') as c:
c.argument('mg', help="The management group id to register.", options_list=['--management-group-id', '-m'])
c.argument('accept_terms', action='store_true', is_preview=True, help="Accept market place terms and RP terms for RPaaS. Required when registering RPs from RPaaS, such as 'Microsoft.Confluent' and 'Microsoft.Datadog'.", deprecate_info=c.deprecate(hide=True))
c.argument('wait', action='store_true', help='wait for the registration to finish')
c.argument('consent_to_permissions', options_list=['--consent-to-permissions', '-c'], action='store_true', help='A value indicating whether authorization is consented or not.')
with self.argument_context('provider unregister') as c:
c.argument('wait', action='store_true', help='wait for unregistration to finish')
with self.argument_context('provider operation') as c:
c.argument('api_version', help="The api version of the 'Microsoft.Authorization/providerOperations' resource (omit for the latest stable version)")
with self.argument_context('feature') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT)
c.argument('feature_name', options_list=['--name', '-n'], help='the feature name')
with self.argument_context('feature list') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT)
with self.argument_context('feature registration') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=True, help=_PROVIDER_HELP_TEXT)
c.argument('feature_name', options_list=['--name', '-n'], help='the feature name')
with self.argument_context('feature registration list') as c:
c.argument('resource_provider_namespace', options_list='--namespace', required=False, help=_PROVIDER_HELP_TEXT)
with self.argument_context('policy') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group where the policy will be applied')
with self.argument_context('policy definition', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_definition_name', arg_type=existing_policy_definition_name_type)
c.argument('rules', help='JSON formatted string or a path to a file with such content', type=file_type, completer=FilesCompleter())
c.argument('display_name', help='Display name of policy definition.')
c.argument('description', help='Description of policy definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
c.argument('metadata', min_api='2017-06-01-preview', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
c.argument('management_group', arg_type=management_group_name_type)
c.argument('mode', options_list=['--mode', '-m'], help='Mode of the policy definition, e.g. All, Indexed. Please visit https://aka.ms/azure-policy-mode for more information.', min_api='2016-12-01')
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy definition create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy definition.')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.ignore('_subscription')
c.argument('name', options_list=['--name', '-n'], completer=get_policy_assignment_completion_list, help='Name of the policy assignment.')
c.argument('scope', help='Scope to which this policy assignment applies.')
c.argument('disable_scope_strict_match', action='store_true', help='Include policy assignments either inherited from parent scope or at child scope.')
c.argument('display_name', help='Display name of the policy assignment.')
c.argument('description', help='Description of the policy assignment.', min_api='2016-12-01')
c.argument('policy', help='Name or id of the policy definition.', completer=get_policy_completion_list)
c.argument('params', options_list=['--params', '-p'], help='JSON formatted string or a path to a file or uri with parameter values of the policy rule.', type=file_type, completer=FilesCompleter(), min_api='2016-12-01')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as c:
c.argument('policy_set_definition', options_list=['--policy-set-definition', '-d'], help='Name or id of the policy set definition.')
c.argument('sku', options_list=['--sku', '-s'], help='policy sku.', arg_type=get_enum_type(['free', 'standard']), deprecate_info=c.deprecate(hide=True))
c.argument('notscopes', options_list='--not-scopes', nargs='+')
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, arg_group='Managed Identity', min_api='2018-05-01') as c:
c.argument('assign_identity', nargs='*', validator=validate_msi, help="Assigns a system assigned identity to the policy assignment.")
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2019-06-01') as c:
c.argument('enforcement_mode', options_list=['--enforcement-mode', '-e'], help='Enforcement mode of the policy assignment, e.g. Default, DoNotEnforce. Please visit https://aka.ms/azure-policyAssignment-enforcement-mode for more information.', arg_type=get_enum_type(EnforcementMode))
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy assignment.')
with self.argument_context('policy assignment create', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), help='The location of the policy assignment. Only required when utilizing managed identity.')
with self.argument_context('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as c:
c.argument('identity_scope', arg_type=identity_scope_type)
c.argument('identity_role', arg_type=identity_role_type)
with self.argument_context('policy assignment non-compliance-message', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as c:
c.argument('message', options_list=['--message', '-m'], help='Message that will be shown when a resource is denied by policy or evaluation details are inspected.')
c.argument('policy_definition_reference_id', options_list=['--policy-definition-reference-id', '-r'], help='Policy definition reference ID within the assigned initiative (policy set) that the message applies to.')
with self.argument_context('policy set-definition', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('policy_set_definition_name', arg_type=existing_policy_set_definition_name_type)
c.argument('display_name', help='Display name of policy set definition.')
c.argument('description', help='Description of policy set definition.')
c.argument('params', help='JSON formatted string or a path to a file or uri with parameter definitions.', type=file_type, completer=FilesCompleter())
c.argument('definitions', help='JSON formatted string or a path to a file or uri containing definitions.', type=file_type, completer=FilesCompleter())
c.argument('definition_groups', min_api='2019-09-01', help='JSON formatted string or a path to a file or uri containing policy definition groups. Groups are used to organize policy definitions within a policy set.', type=file_type, completer=FilesCompleter())
c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
c.argument('management_group', arg_type=management_group_name_type)
c.argument('subscription', arg_type=subscription_type)
c.ignore('_subscription') # disable global subscription
with self.argument_context('policy set-definition create', min_api='2017-06-01-preview', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy set definition.')
with self.argument_context('policy exemption', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.ignore('_subscription')
c.argument('name', options_list=['--name', '-n'], completer=get_policy_exemption_completion_list, help='Name of the policy exemption.')
c.argument('scope', help='Scope to which this policy exemption applies.')
c.argument('disable_scope_strict_match', options_list=['--disable-scope-strict-match', '-i'], action='store_true', help='Include policy exemptions either inherited from parent scope or at child scope.')
c.argument('display_name', help='Display name of the policy exemption.')
c.argument('description', help='Description of policy exemption.')
c.argument('exemption_category', options_list=['--exemption-category', '-e'], help='The policy exemption category of the policy exemption', arg_type=get_enum_type(ExemptionCategory))
c.argument('policy_definition_reference_ids', nargs='+', options_list=['--policy-definition-reference-ids', '-r'], help='The policy definition reference ids to exempt in the initiative (policy set).')
c.argument('expires_on', help='The expiration date and time (in UTC ISO 8601 format yyyy-MM-ddTHH:mm:ssZ) of the policy exemption.')
c.argument('metadata', nargs='+', validator=validate_metadata, help='Metadata in space-separated key=value pairs.')
with self.argument_context('policy exemption create', min_api='2020-09-01', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as c:
c.argument('name', options_list=['--name', '-n'], help='Name of the new policy exemption.')
c.argument('policy_assignment', options_list=['--policy-assignment', '-a'], help='The referenced policy assignment Id for the policy exemption.')
with self.argument_context('group') as c:
c.argument('tag', tag_type)
c.argument('tags', tags_type)
c.argument('resource_group_name', resource_group_name_type, options_list=['--name', '-n', '--resource-group', '-g'])
with self.argument_context('group deployment') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list)
c.argument('deployment_name', arg_type=deployment_name_type)
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('template_uri', arg_type=deployment_template_uri_type)
c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'),
help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)')
c.argument('parameters', arg_type=deployment_parameters_type)
c.argument('rollback_on_error', nargs='?', action=RollbackAction,
help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.')
with self.argument_context('group deployment create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'],
help='Auxiliary subscriptions which will be used during deployment across tenants.',
deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants'))
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('group deployment validate') as c:
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('group deployment list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('group deployment operation show') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('deployment') as c:
c.argument('deployment_name', arg_type=deployment_name_type)
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('template_uri', arg_type=deployment_template_uri_type)
c.argument('template_spec', arg_type=deployment_template_spec_type)
c.argument('query_string', arg_type=deployment_query_string_type)
c.argument('parameters', arg_type=deployment_parameters_type)
with self.argument_context('deployment create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment operation') as c:
c.argument('operation_ids', nargs='+', help='A list of operation ids to show')
with self.argument_context('deployment list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment sub') as c:
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment sub create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment sub what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment sub validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment sub list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment group') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, completer=get_resource_group_completion_list, required=True)
c.argument('mode', arg_type=get_enum_type(DeploymentMode, default='incremental'), help='Incremental (only add resources to resource group) or Complete (remove extra resources from resource group)')
c.argument('rollback_on_error', nargs='?', action=RollbackAction,
help='The name of a deployment to roll back to on error, or use as a flag to roll back to the last successful deployment.')
with self.argument_context('deployment group create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('aux_subscriptions', nargs='+', options_list=['--aux-subs'],
help='Auxiliary subscriptions which will be used during deployment across tenants.',
deprecate_info=c.deprecate(target='--aux-subs', redirect='--aux-tenants'))
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type)
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type)
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment group what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('aux_tenants', nargs='+', options_list=['--aux-tenants'],
help='Auxiliary tenants which will be used during deployment across tenants.')
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
c.ignore("rollback_on_error")
with self.argument_context('deployment group validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment group list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment mg') as c:
c.argument('management_group_id', arg_type=management_group_id_type)
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment mg create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api="2019-10-01")
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type, min_api="2019-10-01")
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.",
min_api="2019-10-01")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment mg what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment mg validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment mg list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('deployment operation mg') as c:
c.argument('management_group_id', arg_type=management_group_id_type)
with self.argument_context('deployment tenant') as c:
c.argument('deployment_location', arg_type=get_location_type(self.cli_ctx), required=True)
with self.argument_context('deployment tenant create') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
c.argument('confirm_with_what_if', arg_type=deployment_what_if_confirmation_type, min_api="2019-10-01")
c.argument('what_if_result_format', options_list=['--what-if-result-format', '-r'],
arg_type=deployment_what_if_result_format_type, min_api="2019-10-01")
c.argument('what_if_exclude_change_types', options_list=['--what-if-exclude-change-types', '-x'],
arg_type=deployment_what_if_exclude_change_types_type,
help="Space-separated list of resource change types to be excluded from What-If results. Applicable when --confirm-with-what-if is set.",
min_api="2019-10-01")
c.argument('what_if', arg_type=deployment_what_if_type)
c.argument('proceed_if_no_change', arg_type=deployment_what_if_proceed_if_no_change_type)
with self.argument_context('deployment tenant what-if') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('no_prompt', arg_type=no_prompt)
c.argument('result_format', arg_type=deployment_what_if_result_format_type)
c.argument('no_pretty_print', arg_type=deployment_what_if_no_pretty_print_type)
c.argument('exclude_change_types', arg_type=deployment_what_if_exclude_change_types_type)
with self.argument_context('deployment tenant validate') as c:
c.argument('deployment_name', arg_type=deployment_create_name_type)
c.argument('handle_extended_json_format', arg_type=extended_json_format_type,
deprecate_info=c.deprecate(target='--handle-extended-json-format/-j'))
c.argument('no_prompt', arg_type=no_prompt)
with self.argument_context('deployment tenant list') as c:
c.argument('filter_string', arg_type=filter_type)
with self.argument_context('group export') as c:
c.argument('include_comments', action='store_true')
c.argument('include_parameter_default_value', action='store_true')
c.argument('skip_resource_name_params', action='store_true')
c.argument('skip_all_params', action='store_true')
c.argument('resource_ids', nargs='+', options_list='--resource-ids')
with self.argument_context('group create') as c:
c.argument('rg_name', options_list=['--name', '--resource-group', '-n', '-g'],
help='name of the new resource group', completer=None,
local_context_attribute=LocalContextAttribute(
name='resource_group_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('managed_by', min_api='2016-09-01', help='The ID of the resource that manages this resource group.')
with self.argument_context('group delete') as c:
c.argument('resource_group_name', resource_group_name_type,
options_list=['--name', '-n', '--resource-group', '-g'], local_context_attribute=None)
with self.argument_context('tag') as c:
c.argument('tag_name', tag_name_type)
c.argument('tag_value', tag_value_type)
c.argument('resource_id', tag_resource_id_type)
c.argument('tags', tags_type)
c.argument('operation', arg_type=get_enum_type([item.value for item in list(TagUpdateOperation)]),
help='The update operation: options include Merge, Replace and Delete.')
with self.argument_context('lock') as c:
c.argument('lock_name', options_list=['--name', '-n'], validator=validate_lock_parameters)
c.argument('level', arg_type=get_enum_type(LockLevel), options_list=['--lock-type', '-t'], help='The type of lock restriction.')
c.argument('parent_resource_path', resource_parent_type)
c.argument('resource_provider_namespace', resource_namespace_type)
c.argument('resource_type', arg_type=resource_type_type, completer=get_resource_types_completion_list)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='Name or ID of the resource being locked. If an ID is given, other resource arguments should not be given.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('resource_group', resource_group_name_type, validator=validate_lock_parameters)
with self.argument_context('resource lock') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('resource_name', options_list=['--resource', '--resource-name'], help='If an ID is given, other resource arguments should not be given.', validator=validate_resource_lock)
with self.argument_context('group lock') as c:
c.argument('resource_group', resource_group_name_type, validator=validate_group_lock, id_part=None)
with self.argument_context('group lock create') as c:
c.argument('resource_group', required=True)
with self.argument_context('account lock') as c:
c.argument('resource_group', ignore_type, validator=validate_subscription_lock)
for scope in ['account', 'group']:
with self.argument_context('{} lock'.format(scope)) as c:
c.ignore('resource_provider_namespace', 'parent_resource_path', 'resource_type', 'resource_name')
for scope in ['lock', 'account lock', 'group lock', 'resource lock']:
with self.argument_context(scope) as c:
c.argument('lock_name', options_list=['--name', '-n'], help='Name of the lock')
c.argument('level', options_list=['--lock-type', '-t'], arg_type=get_enum_type([LockLevel.can_not_delete, LockLevel.read_only]), help='The type of lock restriction.')
c.argument('ids', nargs='+', options_list='--ids', help='One or more resource IDs (space-delimited). If provided, no other "Resource Id" arguments should be specified.')
c.argument('notes', help='Notes about this lock.')
with self.argument_context('managedapp') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application', id_part='resource_group')
c.argument('application_name', options_list=['--name', '-n'], id_part='name')
c.argument('tags', tags_type)
with self.argument_context('managedapp definition') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type, help='the resource group of the managed application definition', id_part='resource_group')
c.argument('application_definition_name', options_list=['--name', '-n'], id_part='name')
with self.argument_context('managedapp create') as c:
c.argument('name', options_list=['--name', '-n'], help='name of the new managed application', completer=None)
c.argument('location', help='the managed application location')
c.argument('managedapp_definition_id', options_list=['--managedapp-definition-id', '-d'], help='the full qualified managed application definition id')
c.argument('managedby_resource_group_id', options_list=['--managed-rg-id', '-m'], help='the resource group managed by the managed application')
c.argument('parameters', help='JSON formatted string or a path to a file with such content', type=file_type)
for operation in ['create', 'update']:
with self.argument_context('managedapp definition {}'.format(operation)) as c:
c.argument('lock_level', arg_type=get_enum_type(ApplicationLockLevel), help='The type of lock restriction.')
c.argument('authorizations', options_list=['--authorizations', '-a'], nargs='+', help="space-separated authorization pairs in a format of `<principalId>:<roleDefinitionId>`")
c.argument('create_ui_definition', options_list=['--create-ui-definition', '-c'], help='JSON formatted string or a path to a file with such content', type=file_type)
c.argument('main_template', options_list=['--main-template', '-t'], help='JSON formatted string or a path to a file with such content', type=file_type)
with self.argument_context('account') as c:
c.argument('subscription', options_list=['--subscription', '-s'], help='Name or ID of subscription.', completer=get_subscription_id_list)
c.ignore('_subscription') # hide global subscription parameter
with self.argument_context('account management-group') as c:
c.argument('group_name', options_list=['--name', '-n'])
c.argument('no_register', arg_type=get_three_state_flag(), help='Skip registration for resource provider Microsoft.Management ')
with self.argument_context('account management-group show') as c:
c.argument('expand', options_list=['--expand', '-e'], action='store_true')
c.argument('recurse', options_list=['--recurse', '-r'], action='store_true')
with self.argument_context('account management-group create') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent', options_list=['--parent', '-p'])
with self.argument_context('account management-group update') as c:
c.argument('display_name', options_list=['--display-name', '-d'])
c.argument('parent_id', options_list=['--parent', '-p'])
with self.argument_context('ts') as c:
c.argument('name', options_list=['--name', '-n'], help='The name of the template spec.')
c.argument('version', options_list=['--version', '-v'], help='The template spec version.')
with self.argument_context('ts create') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.')
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.')
c.argument('location', options_list=['--location', '-l'], help='The location to store the template-spec and template-spec version(s). Cannot be changed after creation.')
c.argument('display_name', arg_type=ts_display_name_type)
c.argument('description', arg_type=ts_description_type)
c.argument('version_description', arg_type=ts_version_description_type)
c.argument('tags', tags_type)
c.argument('no_prompt', options_list=['--yes', '-y'], action='store_true', help='Do not prompt for confirmation')
with self.argument_context('ts update') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group to store the template spec.')
c.argument('template_spec', arg_type=deployment_template_spec_type)
c.argument('ui_form_definition_file', arg_type=ui_form_definition_file_type, help='The uiFormDefinition file path in the file system for the template spec version.')
c.argument('template_file', arg_type=deployment_template_file_type)
c.argument('display_name', arg_type=ts_display_name_type)
c.argument('description', arg_type=ts_description_type)
c.argument('version_description', arg_type=ts_version_description_type)
c.argument('tags', tags_type)
with self.argument_context('ts show') as c:
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts export') as c:
c.argument('output_folder', options_list=['--output-folder'], help='Existing folder to output export(s).')
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts delete') as c:
c.argument('resource_group', arg_type=resource_group_name_type, help='The resource group where the template spec or template spec version is stored.')
c.argument('template_spec', arg_type=deployment_template_spec_type)
with self.argument_context('ts list') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
with self.argument_context('bicep build') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the Bicep file to build in the file system."))
c.argument('outdir', arg_type=CLIArgumentType(options_list=['--outdir'], completer=DirectoriesCompleter(),
help="When set, saves the output at the specified directory."))
c.argument('outfile', arg_type=CLIArgumentType(options_list=['--outfile'], completer=FilesCompleter(),
help="When set, saves the output as the specified file path."))
c.argument('stdout', arg_type=CLIArgumentType(options_list=['--stdout'], action='store_true',
help="When set, prints all output to stdout instead of corresponding files."))
with self.argument_context('bicep decompile') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the ARM template to decompile in the file system."))
with self.argument_context('bicep publish') as c:
c.argument('file', arg_type=CLIArgumentType(options_list=['--file', '-f'], completer=FilesCompleter(),
type=file_type, help="The path to the Bicep module file to publish in the file system."))
c.argument('target', arg_type=CLIArgumentType(options_list=['--target', '-t'],
help="The target location where the Bicep module will be published."))
with self.argument_context('bicep install') as c:
c.argument('version', options_list=['--version', '-v'], help='The version of Bicep CLI to be installed. Default to the latest if not specified.')
|
27,400 |
def get_db_entry(path):
"""
Downloads entry from accuraterip.com.
`path' is in the format of the output of table.accuraterip_path().
"""
raw_entry = _download_entry(path)
if not raw_entry:
logger.warning('entry not found in AccurateRip database')
raise EntryNotFound
return _split_responses(raw_entry)
|
def get_db_entry(path):
"""
Download entry from accuraterip.com.
`path' is in the format of the output of table.accuraterip_path().
"""
raw_entry = _download_entry(path)
if not raw_entry:
logger.warning('entry not found in AccurateRip database')
raise EntryNotFound
return _split_responses(raw_entry)
|
47,903 |
def main():
args = build_argparser()
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()
log.info("Creating Inference Engine")
ie = IECore()
if args.device == "CPU" and args.cpu_extension:
ie.add_extension(args.cpu_extension, 'CPU')
log.info("Loading model {}".format(args.model))
model_path = args.model[:-4]
net = ie.read_network(model_path + ".xml", model_path + ".bin")
if args.device == "CPU":
supported_layers = ie.query_network(net, args.device)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) > 0:
raise RuntimeError("Following layers are not supported by the {} plugin:\n {}"
.format(args.device, ', '.join(not_supported_layers)))
if len(net.inputs) != 1:
log.error("Demo supports only models with 1 input layer")
sys.exit(1)
input_blob = next(iter(net.inputs))
input_shape = net.inputs[input_blob].shape
if len(net.outputs) != 1:
log.error("Demo supports only models with 1 output layer")
sys.exit(1)
output_blob = next(iter(net.outputs))
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Preparing input")
labels = []
if args.labels:
with open(args.labels, "r") as file:
labels = [l.rstrip() for l in file.readlines()]
batch_size, channels, _, length = input_shape
audio = AudioSource(args.input, channels=channels, framerate=args.framerate)
audio.load()
hop = length - args.overlap if isinstance(args.overlap, int) else int(length * (1.0 - args.overlap))
if hop < 0:
log.error("Wrong value for '-ol/--overlap' argument - overlapping more than clip length")
sys.exit(1)
log.info("Starting inference")
outputs = []
clips = 0
infer_time = []
for idx, chunk in enumerate(audio.chunks(length, hop, num_chunks=batch_size)):
if len(chunk.shape) != len(input_shape):
chunk = np.reshape(chunk, newshape=input_shape)
infer_start_time = datetime.now()
output = exec_net.infer(inputs={input_blob: chunk})
infer_time.append(datetime.now() - infer_start_time)
clips += batch_size
output = output[output_blob]
for batch, data in enumerate(output):
start_time = (idx*batch_size + batch)*hop / audio.framerate
end_time = ((idx*batch_size + batch)*hop + length) / audio.framerate
outputs.append(data)
label = np.argmax(data)
log.info("[{:.2f}:{:.2f}] - {:s}: {:.2f}%".format(start_time, end_time,
labels[label] if labels else "Class {}".format(label),
data[label] * 100))
if clips == 0:
log.error("Audio too short for inference by that model")
sys.exit(1)
total = np.mean(outputs, axis=0)
label = np.argmax(total)
log.info("Total over audio - {:s}: {:.2f}%".format(labels[label] if labels else "Class {}".format(label),
total[label]*100))
logging.info("Average infer time - {:.3f}s per clip".format((np.array(infer_time).sum() / clips).total_seconds()))
|
def main():
args = build_argparser()
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()
log.info("Creating Inference Engine")
ie = IECore()
if args.device == "CPU" and args.cpu_extension:
ie.add_extension(args.cpu_extension, 'CPU')
log.info("Loading model {}".format(args.model))
model_path = args.model[:-4]
net = ie.read_network(model_path + ".xml", model_path + ".bin")
if args.device == "CPU":
supported_layers = ie.query_network(net, args.device)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) > 0:
raise RuntimeError("Following layers are not supported by the {} plugin:\n {}"
.format(args.device, ', '.join(not_supported_layers)))
if len(net.inputs) != 1:
log.error("Demo supports only models with 1 input layer")
sys.exit(1)
input_blob = next(iter(net.inputs))
input_shape = net.inputs[input_blob].shape
if len(net.outputs) != 1:
log.error("Demo supports only models with 1 output layer")
sys.exit(1)
output_blob = next(iter(net.outputs))
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Preparing input")
labels = []
if args.labels:
with open(args.labels, "r") as file:
labels = [l.rstrip() for l in file.readlines()]
batch_size, channels, _, length = input_shape
audio = AudioSource(args.input, channels=channels, framerate=args.framerate)
audio.load()
hop = length - args.overlap if isinstance(args.overlap, int) else int(length * (1.0 - args.overlap))
if hop < 0:
log.error("Wrong value for '-ol/--overlap' argument - overlapping more than clip length")
sys.exit(1)
log.info("Starting inference")
outputs = []
clips = 0
infer_time = []
for idx, chunk in enumerate(audio.chunks(length, hop, num_chunks=batch_size)):
if len(chunk.shape) != len(input_shape):
chunk = np.reshape(chunk, newshape=input_shape)
infer_start_time = datetime.now()
output = exec_net.infer(inputs={input_blob: chunk})
infer_time.append(datetime.now() - infer_start_time)
clips += batch_size
output = output[output_blob]
for batch, data in enumerate(output):
start_time = (idx*batch_size + batch)*hop / audio.framerate
end_time = ((idx*batch_size + batch)*hop + length) / audio.framerate
outputs.append(data)
label = np.argmax(data)
log.info("[{:.2f}:{:.2f}] - {:s}: {:.2f}%".format(start_time, end_time,
labels[label] if labels else "Class {}".format(label),
data[label] * 100))
if clips == 0:
log.error("Audio too short for inference by that model")
sys.exit(1)
total = np.mean(outputs, axis=0)
label = np.argmax(total)
log.info("Averaged over the audio prediction - {:s}: {:.2f}%".format(labels[label] if labels else "Class {}".format(label),
total[label]*100))
logging.info("Average infer time - {:.3f}s per clip".format((np.array(infer_time).sum() / clips).total_seconds()))
|
20,085 |
def extend_headers(response: flask.Response):
audit_headers = _prepare_headers()
response.headers.extend(audit_headers)
return response
|
def extend_headers(response: flask.Response) -> flask.Response:
audit_headers = _prepare_headers()
response.headers.extend(audit_headers)
return response
|
32,844 |
def activate_distributed_headers(tracer, int_config=None, request_headers=None, override_distributed_tracing=None):
"""
Helper for activating a distributed trace headers' context if enabled in integration config.
int_config will be used to check if distributed trace headers context will be activated, but
override_distributed_tracing will override if passed
"""
int_config = int_config or {}
if override_distributed_tracing is not None and not override_distributed_tracing:
return
if override_distributed_tracing or int_config.get(
"distributed_tracing_enabled", int_config.get("distributed_tracing", False)
):
context = HTTPPropagator.extract(request_headers)
# Only need to activate the new context if something was propagated
if context.trace_id:
tracer.context_provider.activate(context)
|
def activate_distributed_headers(tracer, int_config=None, request_headers=None, override=None):
"""
Helper for activating a distributed trace headers' context if enabled in integration config.
int_config will be used to check if distributed trace headers context will be activated, but
override_distributed_tracing will override if passed
"""
int_config = int_config or {}
if override_distributed_tracing is not None and not override_distributed_tracing:
return
if override_distributed_tracing or int_config.get(
"distributed_tracing_enabled", int_config.get("distributed_tracing", False)
):
context = HTTPPropagator.extract(request_headers)
# Only need to activate the new context if something was propagated
if context.trace_id:
tracer.context_provider.activate(context)
|
32,192 |
def get_jobs(topology: Topology, device_filter_string: str = None, status: str = None, job_type: str = None,
id: int = None) -> List[ShowJobsAllResultData]:
"""
Get all the jobs from the devices in the environment, or a single job when ID is specified.
Jobs are sorted by the most recent queued and are returned in a way that's consumable by Generic Polling.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
:param status: Filter returned jobs by status
:param job_type: Filter returned jobs by type
:param id: Filter by ID
"""
if id:
id = int(id)
result: List[ShowJobsAllResultData] = UniversalCommand.show_jobs(
topology,
device_filter_string,
job_type=job_type,
status=status,
id=id
)
return result
|
def get_jobs(topology: Topology, device_filter_string: str = None, status: str = None, job_type: str = None,
id: Optional[str] = None) -> List[ShowJobsAllResultData]:
"""
Get all the jobs from the devices in the environment, or a single job when ID is specified.
Jobs are sorted by the most recent queued and are returned in a way that's consumable by Generic Polling.
:param topology: `Topology` instance !no-auto-argument
:param device_filter_string: String to filter to only show specific hostnames or serial numbers.
:param status: Filter returned jobs by status
:param job_type: Filter returned jobs by type
:param id: Filter by ID
"""
if id:
id = int(id)
result: List[ShowJobsAllResultData] = UniversalCommand.show_jobs(
topology,
device_filter_string,
job_type=job_type,
status=status,
id=id
)
return result
|
8,576 |
def test_copy_single_distro_file():
"""
Tests copy_single_distro_file() method using a sample initrd file pulled from Centos 8
"""
# Instantiate TFTPGen class with collection_mgr parameter
test_api = CobblerAPI()
generator = tftpgen.TFTPGen(test_api)
# Arrange
distro_file = "/code/tests/test_data/dummy_initramfs"
distro_dir = "/srv/tftpboot/images/"
symlink_ok = True
initramfs_dst_path = "/srv/tftpboot/images/dummy_initramfs"
# Act
generator.copy_single_distro_file(distro_file, distro_dir, symlink_ok)
# Assert
assert os.path.isfile(initramfs_dst_path)
|
def test_copy_single_distro_file():
"""
Tests copy_single_distro_file() method using a sample initrd file pulled from CentOS 8
"""
# Instantiate TFTPGen class with collection_mgr parameter
test_api = CobblerAPI()
generator = tftpgen.TFTPGen(test_api)
# Arrange
distro_file = "/code/tests/test_data/dummy_initramfs"
distro_dir = "/srv/tftpboot/images/"
symlink_ok = True
initramfs_dst_path = "/srv/tftpboot/images/dummy_initramfs"
# Act
generator.copy_single_distro_file(distro_file, distro_dir, symlink_ok)
# Assert
assert os.path.isfile(initramfs_dst_path)
|
44,072 |
def compare_nodes(nodes, expected_wires, expected_names):
"""Helper function to comper nodes of directed multigraph"""
for node, exp_wire in zip(nodes, expected_wires):
assert node.wires.tolist() == exp_wire
for node, exp_name in zip(nodes, expected_names):
assert node.name == exp_name
|
def compare_nodes(nodes, expected_wires, expected_names):
"""Helper function to compare nodes of directed multigraph"""
for node, exp_wire in zip(nodes, expected_wires):
assert node.wires.tolist() == exp_wire
for node, exp_name in zip(nodes, expected_names):
assert node.name == exp_name
|
49,119 |
def test_issue_7814():
circle = Circle(Point(x, 0), y)
line = Line(Point(k, z), slope=0)
assert line.intersection(circle) == [
Point2D(x + sqrt(y**2 - z**2), z), Point2D(x - sqrt(y**2 - z**2), z)]
|
def test_issue_7814():
circle = Circle(Point(x, 0), y)
line = Line(Point(k, z), slope=0)
_s = sqrt((y - z)*(y + z))
assert line.intersection(circle) == [Point2D(x + _s, z), Point2D(x - _s, z)]
|
10,867 |
def get_linuxcnc_ini_file():
"""find linuxcnc INI file with pgrep"""
ps = subprocess.Popen('ps -C linuxcncsvr --no-header -o args'.split(),
stdout=subprocess.PIPE
)
p,e = ps.communicate()
if p is not None: p=p.decode()
if e is not None: e=e.decode()
if ps.returncode:
print(_('get_linuxcnc_ini_file: stdout= %s') % p)
print(_('get_linuxcnc_ini_file: stderr= %s') % e)
return None
ans = p.split()[p.split().index('-ini')+1]
return ans
|
def get_linuxcnc_ini_file():
"""find LinuxCNC INI file with pgrep"""
ps = subprocess.Popen('ps -C linuxcncsvr --no-header -o args'.split(),
stdout=subprocess.PIPE
)
p,e = ps.communicate()
if p is not None: p=p.decode()
if e is not None: e=e.decode()
if ps.returncode:
print(_('get_linuxcnc_ini_file: stdout= %s') % p)
print(_('get_linuxcnc_ini_file: stderr= %s') % e)
return None
ans = p.split()[p.split().index('-ini')+1]
return ans
|
36,155 |
def test_load_backend_if_not_loaded_load_once(manager, monkeypatch):
"""Test the :meth:`aiida.cmdline.utils.decorators.load_backend_if_not_loaded` calls load profile only once."""
mocked = mock.Mock()
with monkeypatch.context() as context:
context.setattr(manager, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1
assert not manager.profile_storage_loaded
# Now actually call ``get_profile_storage`` because since it was mocked in the previous call, it won't actually
# have been called and the implemenation of ``load_backend_if_not_loaded`` working correctly depends on the
# profile storage actually having been initialized.
manager.get_profile_storage()
assert manager.profile_storage_loaded
with monkeypatch.context() as context:
context.setattr(manager, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1, 'Apparently `Manager.get_profile_storage` got called again, which is a bug'
|
def test_load_backend_if_not_loaded_load_once(manager, monkeypatch):
"""Test that the :meth:`aiida.cmdline.utils.decorators.load_backend_if_not_loaded` does not re-load the profile if it was already loaded."""
mocked = mock.Mock()
with monkeypatch.context() as context:
context.setattr(manager, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1
assert not manager.profile_storage_loaded
# Now actually call ``get_profile_storage`` because since it was mocked in the previous call, it won't actually
# have been called and the implemenation of ``load_backend_if_not_loaded`` working correctly depends on the
# profile storage actually having been initialized.
manager.get_profile_storage()
assert manager.profile_storage_loaded
with monkeypatch.context() as context:
context.setattr(manager, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1, 'Apparently `Manager.get_profile_storage` got called again, which is a bug'
|
5,402 |
def is_auth(nodes, pcsuser="hacluster", pcspasswd="hacluster"):
"""
Check if nodes are already authorized
nodes
a list of nodes to be checked for authorization to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
CLI Example:
.. code-block:: bash
salt '*' pcs.is_auth nodes='[node1.example.org node2.example.org]' pcsuser=hacluster pcspasswd=hoonetorg
"""
if __use_new_commands():
cmd = ["pcs", "host", "auth"]
if pcsuser:
cmd += ["-u", pcsuser]
if pcspasswd:
cmd += ["-p", pcspasswd]
cmd += nodes
else:
cmd = ["pcs", "cluster", "auth"]
cmd += nodes
log.info(str(("Commands: ", cmd)))
return __salt__["cmd.run_all"](
cmd, stdin="\n\n", output_loglevel="trace", python_shell=False
)
|
def is_auth(nodes, pcsuser="hacluster", pcspasswd="hacluster"):
"""
Check if nodes are already authorized
nodes
a list of nodes to be checked for authorization to the cluster
pcsuser
user for communitcation with PCS (default: hacluster)
pcspasswd
password for pcsuser (default: hacluster)
CLI Example:
.. code-block:: bash
salt '*' pcs.is_auth nodes='[node1.example.org node2.example.org]' pcsuser=hacluster pcspasswd=hoonetorg
"""
if __use_new_commands():
cmd = ["pcs", "host", "auth"]
if pcsuser:
cmd += ["-u", pcsuser]
if pcspasswd:
cmd += ["-p", pcspasswd]
cmd += nodes
else:
cmd = ["pcs", "cluster", "auth"]
cmd += nodes
log.info("Commands: %s", cmd)
return __salt__["cmd.run_all"](
cmd, stdin="\n\n", output_loglevel="trace", python_shell=False
)
|
27,869 |
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
This function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
dimensions. This function first calculates the absolute value differences
of corresponding elements in x0 and x1, and then returns the mean of those
differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
- In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
- In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
|
def mean_absolute_error(x0, x1):
"""Mean absolute error function.
This function computes the mean absolute error between two variables. The
mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
same dimensions. This function first calculates the absolute value of the differences
of corresponding elements in x0 and x1, and then returns the mean of those
differences.
Args:
x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
Returns:
~chainer.Variable:
A variable holding an array representing the mean absolute
error of two inputs.
.. admonition:: Example
1D array examples:
>>> x = np.array([1, 2, 3]).astype(np.float32)
>>> y = np.array([0, 0, 0]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(2.)
>>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
>>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(6.)
2D array example:
- In this example, there are 4 elements, and thus 4 errors
>>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
>>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(5.5)
3D array example:
- In this example, there are 8 elements, and thus 8 errors
>>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
>>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
>>> x = x.astype(np.float32)
>>> y = y.astype(np.float32)
>>> F.mean_absolute_error(x, y)
variable(3.5)
"""
return MeanAbsoluteError().apply((x0, x1))[0]
|
58,083 |
def get_device_state(topology: Topology, target: str) -> dict:
"""
Get the device state from the provided device target (serial number). Note that this will attempt to connect directly to the
firewall as there is no way to get the device state for a firewall via Panorama.
:param topology: `Topology` instance !no-auto-argument
:param target: String to filter to only show specific hostnames or serial numbers.
"""
result_file_data = FirewallCommand.get_device_state(topology, target)
return fileResult(
filename=f"{target}_device_state.tar.gz",
data=result_file_data,
file_type=EntryType.ENTRY_INFO_FILE
)
|
def get_device_state(topology: Topology, target: str) -> dict:
"""
Get the device state from the provided device target (serial number). Note that this will attempt to connect directly to the
firewall as there is no way to get the device state for a firewall via Panorama.
:param topology: `Topology` instance !no-auto-argument
:param target: String to filter to only show specific hostnames or serial numbers.
"""
return fileResult(
filename=f"{target}_device_state.tar.gz",
data=FirewallCommand.get_device_state(topology, target),
file_type=EntryType.ENTRY_INFO_FILE
)
|
29,925 |
def get_dl_verify_data(section):
"""Returns a pandas DataFrame with all known download object hashes.
The returned dictionary resolves str: cache_obj_name (without section)
to a tuple int(size) and bytes(sha256)
"""
verify_key = 'dl_verify_data_' + section
if cfg.DATA.get(verify_key) is not None:
return cfg.DATA[verify_key]
verify_file_path = os.path.join(cfg.CACHE_DIR, 'downloads.sha256.hdf')
def verify_file(force=False):
"""Check the hash file's own hash"""
if not cfg.PARAMS['has_internet']:
return
if not force and os.path.isfile(verify_file_path) and \
os.path.getmtime(verify_file_path) + CHECKSUM_LIFETIME > time.time():
return
logger.info('Checking the download verification file checksum...')
try:
with requests.get(CHECKSUM_VALIDATION_URL) as req:
req.raise_for_status()
verify_file_sha256 = req.text.split(maxsplit=1)[0]
verify_file_sha256 = bytearray.fromhex(verify_file_sha256)
except Exception as e:
verify_file_sha256 = None
logger.warning('Failed getting verification checksum: ' + repr(e))
if os.path.isfile(verify_file_path) and verify_file_sha256:
sha256 = hashlib.sha256()
with open(verify_file_path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
if sha256.digest() != verify_file_sha256:
logger.warning('%s changed or invalid, deleting.'
% (verify_file_path))
os.remove(verify_file_path)
else:
os.utime(verify_file_path)
if not np.any(['dl_verify_data_' in k for k in cfg.DATA.keys()]):
# We check the hash file only once per session
# no need to do it at each call
verify_file()
if not os.path.isfile(verify_file_path):
if not cfg.PARAMS['has_internet']:
return pd.DataFrame()
logger.info('Downloading %s to %s...'
% (CHECKSUM_URL, verify_file_path))
with requests.get(CHECKSUM_URL, stream=True) as req:
if req.status_code == 200:
mkdir(os.path.dirname(verify_file_path))
with open(verify_file_path, 'wb') as f:
for b in req.iter_content(chunk_size=0xFFFF):
if b:
f.write(b)
logger.info('Done downloading.')
verify_file(True)
if not os.path.isfile(verify_file_path):
logger.warning('Downloading and verifying checksums failed.')
return pd.DataFrame()
try:
data = pd.read_hdf(verify_file_path, key=section)
except KeyError:
data = pd.DataFrame()
cfg.DATA[verify_key] = data
return data
|
def get_dl_verify_data(section):
"""Returns a pandas DataFrame with all known download object hashes.
The returned dictionary resolves str: cache_obj_name (without section)
to a tuple int(size) and bytes(sha256)
"""
verify_key = 'dl_verify_data_' + section
if cfg.DATA.get(verify_key) is not None:
return cfg.DATA[verify_key]
verify_file_path = os.path.join(cfg.CACHE_DIR, 'downloads.sha256.hdf')
def verify_file(force=False):
"""Check the hash file's own hash"""
if not cfg.PARAMS['has_internet']:
return
if not force and os.path.isfile(verify_file_path) and \
os.path.getmtime(verify_file_path) + CHECKSUM_LIFETIME > time.time():
return
logger.info('Checking the download verification file checksum...')
try:
with requests.get(CHECKSUM_VALIDATION_URL) as req:
req.raise_for_status()
verify_file_sha256 = req.text.split(maxsplit=1)[0]
verify_file_sha256 = bytearray.fromhex(verify_file_sha256)
except Exception as e:
verify_file_sha256 = None
logger.warning('Failed getting verification checksum: ' + repr(e))
if os.path.isfile(verify_file_path) and verify_file_sha256:
sha256 = hashlib.sha256()
with open(verify_file_path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
if sha256.digest() != verify_file_sha256:
logger.warning('%s changed or invalid, deleting.'
% (verify_file_path))
os.remove(verify_file_path)
else:
os.utime(verify_file_path)
if not np.any(['dl_verify_data_' in k for k in cfg.DATA.keys()]):
# We check the hash file only once per session
# no need to do it at each call
verify_file()
if not os.path.isfile(verify_file_path):
if not cfg.PARAMS['has_internet']:
return pd.DataFrame()
logger.info('Downloading %s to %s...'
% (CHECKSUM_URL, verify_file_path))
with requests.get(CHECKSUM_URL, stream=True) as req:
if req.status_code == 200:
mkdir(os.path.dirname(verify_file_path))
with open(verify_file_path, 'wb') as f:
for b in req.iter_content(chunk_size=0xFFFF):
if b:
f.write(b)
logger.info('Done downloading.')
verify_file(force=True)
if not os.path.isfile(verify_file_path):
logger.warning('Downloading and verifying checksums failed.')
return pd.DataFrame()
try:
data = pd.read_hdf(verify_file_path, key=section)
except KeyError:
data = pd.DataFrame()
cfg.DATA[verify_key] = data
return data
|
30,410 |
def check_base_branch(pr_num):
print_color('Starting to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error("You cannot merge into master when creating an external PR.")
sys.exit(1)
else:
print_color('Base branch of PR num {} is not master - Great!'.format(pr_num), LOG_COLORS.GREEN)
sys.exit(0)
|
def check_base_branch(pr_num):
print_color('Starting to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
base_branch = get_base_branch(pr_num)
print_color('Finished to fetch the base branch of PR num {}'.format(pr_num), LOG_COLORS.GREEN)
if base_branch == 'master':
print_error('Cannot merge a contribution directly to master, the pull request reviewer will handle that soon.')
sys.exit(1)
else:
print_color('Base branch of PR num {} is not master - Great!'.format(pr_num), LOG_COLORS.GREEN)
sys.exit(0)
|
29,918 |
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_maxd=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_maxd : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_maxd if map_maxd else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
|
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_dmax=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_maxd : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_maxd if map_maxd else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
|
30,285 |
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'DBotScore': [dbot]},
'HumanReadable': 'Hash {} does not exist on Intezer genome database'.format(file_hash),
'ContentsFormat': formats['json'],
'Contents': ''
})
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only sha256, '
'sha1 and md5 hash formats.\n')
handle_analyze_response(response)
|
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {'DBotScore': [dbot]},
'HumanReadable': 'Hash {} does not exist on Intezer genome database'.format(file_hash),
'ContentsFormat': formats['json'],
'Contents': ''
})
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'sha1 and md5 hash formats.\n')
handle_analyze_response(response)
|
28,572 |
def plot_density(
data,
group="posterior",
data_labels=None,
var_names=None,
transform=None,
hdi_prob=None,
point_estimate="auto",
colors="cycle",
outline=True,
hdi_markers="",
shade=0.0,
bw="default",
circular=False,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Generate KDE plots for continuous variables and histograms for discrete ones.
Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per
variable and colors assigned to models.
Parameters
----------
data : Union[Object, Iterator[Object]]
Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator
returning a sequence of such objects.
Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.
group: Optional[str]
Specifies which :class:`arviz.InferenceData` group should be plotted.
Defaults to 'posterior'.
Alternative values include 'prior' and any other strings used as dataset keys in the
:class:`arviz.InferenceData`.
data_labels : Optional[List[str]]
List with names for the datasets passed as "data." Useful when plotting more than one
dataset. Must be the same shape as the data parameter. Defaults to None.
var_names: Optional[List[str]]
List of variables to plot. If multiple datasets are supplied and var_names is not None,
will print the same set of variables for each dataset. Defaults to None, which results in
all the variables being plotted.
transform : callable
Function to transform data (defaults to None i.e. the identity function)
hdi_prob : float
Probability for the highest density interval. Should be in the interval (0, 1).
Defaults to 0.94.
point_estimate : Optional[str]
Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.
colors : Optional[Union[List[str],str]]
List with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically choose a color per model from matplotlib's
cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
models. Defaults to `cycle`.
outline : bool
Use a line to draw KDEs and histograms. Default to True
hdi_markers : str
A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density
interval. Defaults to empty string (no marker).
shade : Optional[float]
Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
(opaque). Defaults to 0.
bw: Optional[float or str]
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when `circular` is False
and "taylor" (for now) when `circular` is True.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: Optional[bool]
If True, it interprets the values passed are from a circular variable measured in radians
and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize : Optional[Tuple[int, int]]
Figure size. If None it will be defined automatically.
textsize: Optional[float]
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
plot_posterior : Plot Posterior densities in the style of John K. Kruschke’s book.
plot_dist : Plot distribution as histogram or kernel density estimates.
Examples
--------
Plot default density plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> az.plot_density([centered, non_centered])
Plot variables in a 4x5 grid
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], grid=(4, 5))
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"])
Plot a specific `az.InferenceData` group
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior")
Specify highest density interval
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], hdi_prob=.5)
Shade plots and/or remove outlines
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8)
Specify binwidth for kernel density estimation
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9)
"""
if not isinstance(data, (list, tuple)):
datasets = [convert_to_dataset(data, group=group)]
else:
datasets = [convert_to_dataset(datum, group=group) for datum in data]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
if labeller is None:
labeller = BaseLabeller()
var_names = _var_names(var_names, datasets)
n_data = len(datasets)
if data_labels is None:
if n_data > 1:
data_labels = [f"{idx}" for idx in range(n_data)]
else:
data_labels = [""]
elif len(data_labels) != n_data:
raise ValueError(
"The number of names for the models ({}) "
"does not match the number of models ({})".format(len(data_labels), n_data)
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
all_labels = []
length_plotters = []
for plotters in to_plot:
length_plotters.append(len(plotters))
for var_name, selection, isel, _ in plotters:
label = labeller.make_label_vert(var_name, selection, isel)
if label not in all_labels:
all_labels.append(label)
length_plotters = len(all_labels)
max_plots = rcParams["plot.max_subplots"]
max_plots = length_plotters if max_plots is None else max_plots
if length_plotters > max_plots:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of variables to plot ({len_plotters}) in plot_density, generating only "
"{max_plots} plots".format(max_plots=max_plots, len_plotters=length_plotters),
UserWarning,
)
all_labels = all_labels[:max_plots]
to_plot = [
[
(var_name, selection, values)
for var_name, selection, isel, values in plotters
if labeller.make_label_vert(var_name, selection, isel) in all_labels
]
for plotters in to_plot
]
length_plotters = max_plots
rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)
if bw == "default":
if circular:
bw = "taylor"
else:
bw = "experimental"
plot_density_kwargs = dict(
ax=ax,
all_labels=all_labels,
to_plot=to_plot,
colors=colors,
bw=bw,
circular=circular,
figsize=figsize,
length_plotters=length_plotters,
rows=rows,
cols=cols,
textsize=textsize,
labeller=labeller,
hdi_prob=hdi_prob,
point_estimate=point_estimate,
hdi_markers=hdi_markers,
outline=outline,
shade=shade,
n_data=n_data,
data_labels=data_labels,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_density", "densityplot", backend)
ax = plot(**plot_density_kwargs)
return ax
|
def plot_density(
data,
group="posterior",
data_labels=None,
var_names=None,
transform=None,
hdi_prob=None,
point_estimate="auto",
colors="cycle",
outline=True,
hdi_markers="",
shade=0.0,
bw="default",
circular=False,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""Generate KDE plots for continuous variables and histograms for discrete ones.
Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per
variable and colors assigned to models.
Parameters
----------
data : Union[Object, Iterator[Object]]
Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator
returning a sequence of such objects.
Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.
group: Optional[str]
Specifies which :class:`arviz.InferenceData` group should be plotted.
Defaults to 'posterior'.
Alternative values include 'prior' and any other strings used as dataset keys in the
:class:`arviz.InferenceData`.
data_labels : Optional[List[str]]
List with names for the datasets passed as "data." Useful when plotting more than one
dataset. Must be the same shape as the data parameter. Defaults to None.
var_names: Optional[List[str]]
List of variables to plot. If multiple datasets are supplied and var_names is not None,
will print the same set of variables for each dataset. Defaults to None, which results in
all the variables being plotted.
transform : callable
Function to transform data (defaults to None i.e. the identity function)
hdi_prob : float
Probability for the highest density interval. Should be in the interval (0, 1].
Defaults to 0.94.
point_estimate : Optional[str]
Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.
colors : Optional[Union[List[str],str]]
List with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically choose a color per model from matplotlib's
cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
models. Defaults to `cycle`.
outline : bool
Use a line to draw KDEs and histograms. Default to True
hdi_markers : str
A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density
interval. Defaults to empty string (no marker).
shade : Optional[float]
Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
(opaque). Defaults to 0.
bw: Optional[float or str]
If numeric, indicates the bandwidth and must be positive.
If str, indicates the method to estimate the bandwidth and must be
one of "scott", "silverman", "isj" or "experimental" when `circular` is False
and "taylor" (for now) when `circular` is True.
Defaults to "default" which means "experimental" when variable is not circular
and "taylor" when it is.
circular: Optional[bool]
If True, it interprets the values passed are from a circular variable measured in radians
and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize : Optional[Tuple[int, int]]
Figure size. If None it will be defined automatically.
textsize: Optional[float]
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
plot_posterior : Plot Posterior densities in the style of John K. Kruschke’s book.
plot_dist : Plot distribution as histogram or kernel density estimates.
Examples
--------
Plot default density plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> az.plot_density([centered, non_centered])
Plot variables in a 4x5 grid
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], grid=(4, 5))
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"])
Plot a specific `az.InferenceData` group
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior")
Specify highest density interval
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], hdi_prob=.5)
Shade plots and/or remove outlines
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8)
Specify binwidth for kernel density estimation
.. plot::
:context: close-figs
>>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9)
"""
if not isinstance(data, (list, tuple)):
datasets = [convert_to_dataset(data, group=group)]
else:
datasets = [convert_to_dataset(datum, group=group) for datum in data]
if transform is not None:
datasets = [transform(dataset) for dataset in datasets]
if labeller is None:
labeller = BaseLabeller()
var_names = _var_names(var_names, datasets)
n_data = len(datasets)
if data_labels is None:
if n_data > 1:
data_labels = [f"{idx}" for idx in range(n_data)]
else:
data_labels = [""]
elif len(data_labels) != n_data:
raise ValueError(
"The number of names for the models ({}) "
"does not match the number of models ({})".format(len(data_labels), n_data)
)
if hdi_prob is None:
hdi_prob = rcParams["stats.hdi_prob"]
else:
if not 1 >= hdi_prob > 0:
raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
all_labels = []
length_plotters = []
for plotters in to_plot:
length_plotters.append(len(plotters))
for var_name, selection, isel, _ in plotters:
label = labeller.make_label_vert(var_name, selection, isel)
if label not in all_labels:
all_labels.append(label)
length_plotters = len(all_labels)
max_plots = rcParams["plot.max_subplots"]
max_plots = length_plotters if max_plots is None else max_plots
if length_plotters > max_plots:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of variables to plot ({len_plotters}) in plot_density, generating only "
"{max_plots} plots".format(max_plots=max_plots, len_plotters=length_plotters),
UserWarning,
)
all_labels = all_labels[:max_plots]
to_plot = [
[
(var_name, selection, values)
for var_name, selection, isel, values in plotters
if labeller.make_label_vert(var_name, selection, isel) in all_labels
]
for plotters in to_plot
]
length_plotters = max_plots
rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)
if bw == "default":
if circular:
bw = "taylor"
else:
bw = "experimental"
plot_density_kwargs = dict(
ax=ax,
all_labels=all_labels,
to_plot=to_plot,
colors=colors,
bw=bw,
circular=circular,
figsize=figsize,
length_plotters=length_plotters,
rows=rows,
cols=cols,
textsize=textsize,
labeller=labeller,
hdi_prob=hdi_prob,
point_estimate=point_estimate,
hdi_markers=hdi_markers,
outline=outline,
shade=shade,
n_data=n_data,
data_labels=data_labels,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_density", "densityplot", backend)
ax = plot(**plot_density_kwargs)
return ax
|
31,376 |
def fetch_incidents(rest_client, last_run=dict()):
"""
This function will execute each interval (default is 1 minute).
Args:
rest_client (Client): Demisto BaseClient
last_run (dict): Information about the last successful execution of fetch incidents
If last_run is None then fetch all open incidents
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
if last_run is None:
last_run = dict()
# get tenant ids
tenant_mappings = rest_client.get_tenant_mappings()
incidents = []
next_run = last_run
# get incidents for each tenant
for internal_tenant_id, external_tenant_id in tenant_mappings.items():
# Get the last fetch time for tenant, if exists, which will be used as the 'search from here onward' time
latest_time = None
from_time = ''
if last_run.get(external_tenant_id) is not None:
latest_time = last_run.get(external_tenant_id).get('time')
if latest_time is not None:
# latest_time+1 (ms) to prevent duplicates
from_time = datetime.utcfromtimestamp((int(latest_time) + 1) / 1000).strftime(
'%Y-%m-%d %H:%M:%S.%f')
# convert to utc datetime for incidents filter
raw_incidents = fetch_incidents_for_tenant(rest_client, internal_tenant_id,
external_tenant_id, from_time)
raw_incidents.sort(key=lambda x: x.get('dateCreated'))
for raw_incident in raw_incidents:
try:
formatted_incident = format_raw_incident(raw_incident, external_tenant_id,
internal_tenant_id)
new_incident = {
'name': external_tenant_id + ': ' + raw_incident['id'],
'occurred': formatted_incident.get('timeGenerated'),
'rawJSON': json.dumps(formatted_incident)
}
incidents.append(new_incident)
if latest_time is None or raw_incident['dateCreated'] > latest_time:
latest_time = raw_incident['dateCreated']
except Exception as err:
demisto.error(
'Exception thrown collecting specific incident for tenant: ' + external_tenant_id + str(
err) + '\n incident: ' + str(raw_incident))
break
# store
if external_tenant_id in next_run:
next_run[external_tenant_id]['time'] = latest_time
else:
next_run[external_tenant_id] = {'time': latest_time}
return next_run, incidents
|
def fetch_incidents(rest_client, last_run):
"""
This function will execute each interval (default is 1 minute).
Args:
rest_client (Client): Demisto BaseClient
last_run (dict): Information about the last successful execution of fetch incidents
If last_run is None then fetch all open incidents
Returns:
next_run: This will be last_run in the next fetch-incidents
incidents: Incidents that will be created in Demisto
"""
if last_run is None:
last_run = dict()
# get tenant ids
tenant_mappings = rest_client.get_tenant_mappings()
incidents = []
next_run = last_run
# get incidents for each tenant
for internal_tenant_id, external_tenant_id in tenant_mappings.items():
# Get the last fetch time for tenant, if exists, which will be used as the 'search from here onward' time
latest_time = None
from_time = ''
if last_run.get(external_tenant_id) is not None:
latest_time = last_run.get(external_tenant_id).get('time')
if latest_time is not None:
# latest_time+1 (ms) to prevent duplicates
from_time = datetime.utcfromtimestamp((int(latest_time) + 1) / 1000).strftime(
'%Y-%m-%d %H:%M:%S.%f')
# convert to utc datetime for incidents filter
raw_incidents = fetch_incidents_for_tenant(rest_client, internal_tenant_id,
external_tenant_id, from_time)
raw_incidents.sort(key=lambda x: x.get('dateCreated'))
for raw_incident in raw_incidents:
try:
formatted_incident = format_raw_incident(raw_incident, external_tenant_id,
internal_tenant_id)
new_incident = {
'name': external_tenant_id + ': ' + raw_incident['id'],
'occurred': formatted_incident.get('timeGenerated'),
'rawJSON': json.dumps(formatted_incident)
}
incidents.append(new_incident)
if latest_time is None or raw_incident['dateCreated'] > latest_time:
latest_time = raw_incident['dateCreated']
except Exception as err:
demisto.error(
'Exception thrown collecting specific incident for tenant: ' + external_tenant_id + str(
err) + '\n incident: ' + str(raw_incident))
break
# store
if external_tenant_id in next_run:
next_run[external_tenant_id]['time'] = latest_time
else:
next_run[external_tenant_id] = {'time': latest_time}
return next_run, incidents
|
23,159 |
def test_from_pandas_chunksize_one():
# See: https://github.com/dask/dask/issues/9218
df = pd.DataFrame(np.random.randint(0, 10, size=(10, 4)), columns=list("ABCD"))
ddf = dd.from_pandas(df, chunksize=1)
num_rows = list(ddf.map_partitions(lambda df: len(df)).compute())
# chunksize=1 with range index should
# always have unit-length partitions
assert num_rows == [1] * 10
|
def test_from_pandas_chunksize_one():
# See: https://github.com/dask/dask/issues/9218
df = pd.DataFrame(np.random.randint(0, 10, size=(10, 4)), columns=list("ABCD"))
ddf = dd.from_pandas(df, chunksize=1)
num_rows = list(ddf.map_partitions(len).compute())
# chunksize=1 with range index should
# always have unit-length partitions
assert num_rows == [1] * 10
|
48,462 |
def deprecation_versions():
''' Create a list of valid version for deprecation entries, current+4 '''
current = float('.'.join(__version__.split('.')[0:2]))
return Any(*[str(current + x / 100) for x in range(0, 5)])
|
def deprecation_versions():
"""Create a list of valid version for deprecation entries, current+4"""
major, minor = [int(version) for version in __version__.split('.')[0:2]]
return Any(*['{0}.{1}'.format(major, minor + increment) for increment in range(0, 5)])
|
28,281 |
def _expand_data_to_arrays(data: List[List[Any]], paramspecs: Sequence[ParamSpecBase]) -> None:
types = [param.type for param in paramspecs]
# if we have array type parameters expand all other parameters
# to arrays
if 'array' in types:
if ('numeric' in types or 'text' in types
or 'complex' in types):
first_array_element = types.index('array')
numeric_elms = [i for i, x in enumerate(types)
if x == "numeric"]
complex_elms = [i for i, x in enumerate(types)
if x == 'complex']
text_elms = [i for i, x in enumerate(types)
if x == "text"]
for row in data:
for element in numeric_elms:
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(np.float64))
# todo should we handle int/float types here
# we would in practice have to perform another
# loop to check that all elements of a given can be cast to
# int without loosing precision before choosing an integer
# representation of the array
for element in complex_elms:
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(np.complex128))
for element in text_elms:
strlen = len(row[element])
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(f'U{strlen}'))
for row in data:
# now expand all one element arrays to match the expected size
# one element arrays are introduced if numeric values are storred
# with an explicit array storage type
sizes = tuple(array.size for array in row)
max_size = max(sizes)
max_index = sizes.index(max_size)
for i, array in enumerate(row):
if array.size != max_size:
if array.size == 1:
row[i] = np.full_like(row[max_index],
row[i],
dtype=row[i].dtype)
else:
log.warning(f"Cannot expand array of size {array.size} "
f"to size {row[max_index].size}")
|
def _expand_data_to_arrays(data: List[List[Any]], paramspecs: Sequence[ParamSpecBase]) -> None:
types = [param.type for param in paramspecs]
# if we have array type parameters expand all other parameters
# to arrays
if 'array' in types:
if ('numeric' in types or 'text' in types
or 'complex' in types):
first_array_element = types.index('array')
numeric_elms = [i for i, x in enumerate(types)
if x == "numeric"]
complex_elms = [i for i, x in enumerate(types)
if x == 'complex']
text_elms = [i for i, x in enumerate(types)
if x == "text"]
for row in data:
for element in numeric_elms:
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(np.float64))
# todo should we handle int/float types here
# we would in practice have to perform another
# loop to check that all elements of a given can be cast to
# int without loosing precision before choosing an integer
# representation of the array
for element in complex_elms:
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(np.complex128))
for element in text_elms:
strlen = len(row[element])
row[element] = np.full_like(row[first_array_element],
row[element],
dtype=np.dtype(f'U{strlen}'))
for row in data:
# now expand all one element arrays to match the expected size
# one element arrays are introduced if scalar values are stored
# with an explicit array storage type
sizes = tuple(array.size for array in row)
max_size = max(sizes)
max_index = sizes.index(max_size)
for i, array in enumerate(row):
if array.size != max_size:
if array.size == 1:
row[i] = np.full_like(row[max_index],
row[i],
dtype=row[i].dtype)
else:
log.warning(f"Cannot expand array of size {array.size} "
f"to size {row[max_index].size}")
|
5,780 |
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests that bear upon the same hypothesis
and are based on continuous distributions.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests based on
continuous distributions.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
All methods assume tests based on continuous distributions, i.e., the
distribution of p-values under the null hypothesis must be the continuous
uniform distribution on the interval [0,1]. If this function is applied to
tests with a discrete statistics such as any rank test or contingency-table
test, it will yield systematically wrong results, e.g. Fisher's method will
systematically overestimate the p-value [1]_. This problem becomes less
severe for large sample sizes when the discrete distributions become
approximately continuous.
The differences between the methods can be best illustrated by their
statistics and what aspects of a combination of p-values they emphasise
when considering significance [2]_. For example, methods emphasising large
p-values are more sensitive to strong false and true negatives; conversely
methods focussing on small p-values are sensitive to positives.
* Fisher's method (also known as Fisher's combined probability test) [3]_
uses the product of individual p-values: :math:`\\prod_i p_i`.
(Mind that this product is not the combined p-value.)
This method emphasises small p-values.
Note that the test statistic used internally and returned by this method
is :math:`-2\\sum_i \\log(p_i)` for numerical reasons.
* Pearson's method uses :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
It thus emphasises large p-values.
Note that the test statistic used internally and returned by this method
is :math:`-2\\sum_i \\log(1-p_i)` for numerical reasons.
* Mudholkar and George compromise between Fisher's and Pearson's method by
averaging their statistics [4]_. Their method emphasises extreme
p-values, both close to 1 and 0.
* Stouffer's method [5]_ uses Z-scores and the statistic:
:math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
standard normal distribution. The advantage of this method is that it is
straightforward to introduce weights, which can make Stouffer's method
more powerful than Fisher's method when the p-values are from studies
of different size [6]_ [7]_.
* Tippett's method uses the smallest p-value as a statistic.
(Mind that this minimum is not the combined p-value.)
Fisher's method may be extended to combine p-values from dependent tests
[8]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
Distributions." Journal of the American Statistical Association 57,
no. 297 (1962), 10-19.
.. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [6] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.cdf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.cdf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Valid methods are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', and 'stouffer'", method)
return (statistic, pval)
|
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests that bear upon the same hypothesis
and are based on continuous distributions.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests based on
continuous distributions.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
All methods assume tests based on continuous distributions, i.e., the
distribution of p-values under the null hypothesis must be the continuous
uniform distribution on the interval [0,1]. If this function is applied to
tests with a discrete statistics such as any rank test or contingency-table
test, it will yield systematically wrong results, e.g. Fisher's method will
systematically overestimate the p-value [1]_. This problem becomes less
severe for large sample sizes when the discrete distributions become
approximately continuous.
The differences between the methods can be best illustrated by their
statistics and what aspects of a combination of p-values they emphasise
when considering significance [2]_. For example, methods emphasising large
p-values are more sensitive to strong false and true negatives; conversely
methods focussing on small p-values are sensitive to positives.
* Fisher's method (also known as Fisher's combined probability test) [3]_
uses the product of individual p-values: :math:`\\prod_i p_i`.
(Mind that this product is not the combined p-value.)
This method emphasises small p-values.
Note that the test statistic used internally and returned by this method
is :math:`-2\\sum_i \\log(p_i)` for numerical reasons.
* Pearson's method uses :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
It thus emphasises large p-values.
Note that the test statistic used internally and returned by this method
is :math:`-2\\sum_i \\log(1-p_i)` for numerical reasons.
* Mudholkar and George compromise between Fisher's and Pearson's method by
averaging their statistics [4]_. Their method emphasises extreme
p-values, both close to 1 and 0.
* Stouffer's method [5]_ uses Z-scores and the statistic:
:math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
standard normal distribution. The advantage of this method is that it is
straightforward to introduce weights, which can make Stouffer's method
more powerful than Fisher's method when the p-values are from studies
of different size [6]_ [7]_.
* Tippett's method uses the smallest p-value as a statistic.
(Mind that this minimum is not the combined p-value.)
Fisher's method may be extended to combine p-values from dependent tests
[8]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
Distributions." Journal of the American Statistical Association 57,
no. 297 (1962), 10-19.
.. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [6] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.cdf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.cdf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Valid methods are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', and 'stouffer'", method)
return (statistic, pval)
|
47,396 |
def has_length(dataset):
"""
Checks if the dataset implements __len__() and it doesn't raise an error
"""
try:
if len(dataset) is not None:
return True
except TypeError:
# TypeError: len() of unsized object
return False
|
def has_length(dataset):
"""
Checks if the dataset implements __len__() and it doesn't raise an error
"""
try:
return len(dataset) is not None
except TypeError:
# TypeError: len() of unsized object
return False
|
47,028 |
def get_scheduler(
name: Union[str, SchedulerType],
optimizer: Optimizer,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
):
"""
Unified API to get any scheduler from its name.
Args:
name (:obj:`str` or `:obj:`SchedulerType`):
The name of the scheduler to use.
optimizer (:obj:`torch.optim.Optimizer`):
The optimizer that will be used during training.
num_warmup_steps (:obj:`int`, `optional`):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
num_training_steps (:obj:`int`, `optional`):
The number of training steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
"""
name = SchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(optimizer)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
# All other schedulers require `num_warmup_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
|
def get_scheduler(
name: Union[str, SchedulerType],
optimizer: Optimizer,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
):
"""
Unified API to get any scheduler from its name.
Args:
name (:obj:`str` or `:obj:`SchedulerType`):
The name of the scheduler to use.
optimizer (:obj:`torch.optim.Optimizer`):
The optimizer that will be used during training.
num_warmup_steps (:obj:`int`, `optional`):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
num_training_steps (:obj:`int`, `optional`):
The number of training steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
"""
name = SchedulerType(name)
schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(optimizer)
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
|
57,889 |
def item_to_incident(item):
if not (occured := item.get('timestamp_occured_iso8601')):
occured = convert_timestamp_to_iso86(
item.get('timestamp_occured', ''),
item.get("time_offset", 'Z')
)
incident = {
'Type': 'AlienVault USM',
'name': 'Alarm: ' + item.get('uuid'),
'occurred': occured,
'rawJSON': json.dumps(item),
}
return incident
|
def item_to_incident(item):
if not (occurred := item.get('timestamp_occured_iso8601')):
occurred = convert_timestamp_to_iso86(
item.get('timestamp_occured', ''),
item.get("time_offset", 'Z')
)
incident = {
'Type': 'AlienVault USM',
'name': 'Alarm: ' + item.get('uuid'),
'occurred': occured,
'rawJSON': json.dumps(item),
}
return incident
|
31,345 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'picus-specific-threats-results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
8,250 |
def carrington_header(date, observer_coordinate, *, shape_out, projection_code="CAR"):
"""
Construct a FITS-WCS header for a Carrington coordinate frame.
The date-time and observer coordinate of the new coordinate frame
are taken from the input map. The resulting WCS covers the full surface
of the Sun, and has a reference coordinate at (0, 0) degrees Carrington
Longitude/Latitude.
Parameters
----------
date :
Date for the output header.
observer_coordinate :
Observer coordinate for the output header.
shape_out : [int, int]
Output map shape, number of pixels in (latitude, longitude).
projection_code : {'CAR', 'CEA'}
Projection to use for the latitude coordinate.
Returns
-------
`~sunpy.util.MetaDict`
"""
valid_codes = {"CAR", "CEA"}
if projection_code not in valid_codes:
raise ValueError(f"projection_code must be one of {valid_codes}")
frame_out = SkyCoord(
0 * u.deg,
0 * u.deg,
frame="heliographic_carrington",
obstime=date,
observer=observer_coordinate,
)
if projection_code == "CAR":
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1])] * u.deg / u.pix
elif projection_code == "CEA":
# Since, this map uses the cylindrical equal-area (CEA) projection,
# the spacing needs to be to 180/pi times the sin(latitude)
# spacing
# Reference: Section 5.5, Thompson 2006
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1]) / (np.pi / 2)] * u.deg / u.pix
# Header helper expects shape to be in [y, x] order, but scale in [x, y]...
header = make_fitswcs_header(shape_out[::-1], frame_out, scale=scale, projection_code=projection_code)
return header
|
def carrington_header(date, observer_coordinate, *, shape_out, projection_code="CAR"):
"""
Construct a FITS-WCS header for a Carrington coordinate frame.
The date-time and observer coordinate of the new coordinate frame
are taken from the input map. The resulting WCS covers the full surface
of the Sun, and has a reference coordinate at (0, 0) degrees Carrington
Longitude/Latitude.
Parameters
----------
date :
Date for the output header.
observer_coordinate :
Observer coordinate for the output header.
shape_out : [int, int]
Output map shape, number of pixels in (latitude, longitude).
projection_code : {'CAR', 'CEA'}
Projection to use for the latitude coordinate.
Returns
-------
`~sunpy.util.MetaDict`
"""
valid_codes = {"CAR", "CEA"}
if projection_code not in valid_codes:
raise ValueError(f"projection_code must be one of {valid_codes}")
frame_out = SkyCoord(
0 * u.deg,
0 * u.deg,
frame="heliographic_carrington",
obstime=date,
observer=observer_coordinate,
)
if projection_code == "CAR":
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1])] * u.deg / u.pix
elif projection_code == "CEA":
# Since, this map uses the cylindrical equal-area (CEA) projection,
# the spacing needs to be to 180/pi times the sin(latitude)
# spacing
# Reference: Section 5.5, Thompson 2006
scale = [360 / int(shape_out[0]), (180 / np.pi) * (2 / int(shape_out[1]))] * u.deg / u.pix
# Header helper expects shape to be in [y, x] order, but scale in [x, y]...
header = make_fitswcs_header(shape_out[::-1], frame_out, scale=scale, projection_code=projection_code)
return header
|
58,983 |
def test_normalize_3d_tensor():
torch.manual_seed(28)
n_channels = 3
img_size = 10
mean = torch.rand(n_channels)
std = torch.rand(n_channels)
img = torch.rand(n_channels, img_size, img_size)
target = F.normalize(img, mean, std)
mean_unsqueezed = mean.view(-1, 1, 1)
std_unsqueezed = std.view(-1, 1, 1)
result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
result2 = F.normalize(img, mean_unsqueezed.repeat(1, img_size, img_size), std_unsqueezed.repeat(1, img_size, img_size))
torch.testing.assert_close(target, result1)
torch.testing.assert_close(target, result2)
|
def test_normalize_3d_tensor():
torch.manual_seed(28)
n_channels = 3
img_size = 10
mean = torch.rand(n_channels)
std = torch.rand(n_channels)
img = torch.rand(n_channels, img_size, img_size)
target = F.normalize(img, mean, std)
mean_unsqueezed = mean.view(-1, 1, 1)
std_unsqueezed = std.view(-1, 1, 1)
result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
result2 = F.normalize(img, mean_unsqueezed.repeat(1, img_size, img_size),
std_unsqueezed.repeat(1, img_size, img_size))
torch.testing.assert_close(target, result1)
torch.testing.assert_close(target, result2)
|
35,024 |
def save_module(module_path, graph, lib, params, cross=None,
cross_options=None):
"""
Create a tarball containing the generated TVM graph,
exported library and parameters
Parameters
----------
module_path : str
path to the target tar.gz file to be created,
including the file name
graph : str
A JSON-serialized TVM execution graph.
lib : tvm.module.Module
A TVM module containing the compiled functions.
params : dict
The parameters (weights) for the TVM module.
cross : str or callable object, optional
Function that performs the actual compilation
cross_options : sst of cross compilation options
"""
lib_name = "mod.so"
graph_name = "mod.json"
param_name = "mod.params"
temp = utils.tempdir()
path_lib = temp.relpath(lib_name)
if not cross:
logger.debug("exporting library to %s", path_lib)
lib.export_library(path_lib)
else:
logger.debug("exporting library to %s , using cross compiler %s", path_lib, cross)
lib.export_library(path_lib, cc.cross_compiler(cross, options=cross_options.split(' ')))
with open(temp.relpath(graph_name), "w") as graph_file:
logger.debug("writing graph to file to %s", graph_file.name)
graph_file.write(graph)
with open(temp.relpath(param_name), "wb") as params_file:
logger.debug("writing params to file to %s", params_file.name)
params_file.write(runtime.save_param_dict(params))
logger.debug("saving module as tar file to %s", module_path)
with tarfile.open(module_path, "w") as tar:
tar.add(path_lib, lib_name)
tar.add(temp.relpath(graph_name), graph_name)
tar.add(temp.relpath(param_name), param_name)
|
def save_module(module_path, graph, lib, params, cross=None, cross_options=None):
"""
Create a tarball containing the generated TVM graph,
exported library and parameters
Parameters
----------
module_path : str
path to the target tar.gz file to be created,
including the file name
graph : str
A JSON-serialized TVM execution graph.
lib : tvm.module.Module
A TVM module containing the compiled functions.
params : dict
The parameters (weights) for the TVM module.
cross : str or callable object, optional
Function that performs the actual compilation
cross_options : sst of cross compilation options
"""
lib_name = "mod.so"
graph_name = "mod.json"
param_name = "mod.params"
temp = utils.tempdir()
path_lib = temp.relpath(lib_name)
if not cross:
logger.debug("exporting library to %s", path_lib)
lib.export_library(path_lib)
else:
logger.debug("exporting library to %s , using cross compiler %s", path_lib, cross)
lib.export_library(path_lib, cc.cross_compiler(cross, options=cross_options.split(' ')))
with open(temp.relpath(graph_name), "w") as graph_file:
logger.debug("writing graph to file to %s", graph_file.name)
graph_file.write(graph)
with open(temp.relpath(param_name), "wb") as params_file:
logger.debug("writing params to file to %s", params_file.name)
params_file.write(runtime.save_param_dict(params))
logger.debug("saving module as tar file to %s", module_path)
with tarfile.open(module_path, "w") as tar:
tar.add(path_lib, lib_name)
tar.add(temp.relpath(graph_name), graph_name)
tar.add(temp.relpath(param_name), param_name)
|
2,058 |
def _deprecate_positional_args(f):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
f : function
function to check arguments on.
"""
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = ['{}={}'.format(name, arg)
for name, arg in zip(kwonly_args[:extra_args],
args[-extra_args:])]
warnings.warn("Pass {} as keyword args. From version 0.25 "
"passing these as positional arguments will "
"result in an error".format(", ".join(args_msg)),
FutureWarning)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
|
def _deprecate_positional_args(f):
"""Decorator for methods that issues warnings for positional arguments.
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
f : callable
function to check arguments on.
"""
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args <= 0:
return f(*args, **kwargs)
# extra_args > 0
args_msg = ['{}={}'.format(name, arg)
for name, arg in zip(kwonly_args[:extra_args],
args[-extra_args:])]
warnings.warn("Pass {} as keyword args. From version 0.25 "
"passing these as positional arguments will "
"result in an error".format(", ".join(args_msg)),
FutureWarning)
kwargs.update(zip(sig.parameters, args))
return f(**kwargs)
return inner_f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.