id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
42,786 |
def test_str_lower():
"""Test string converts to lowercase"""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"graham chapman",
"john cleese",
"terry gilliam",
"eric idle",
"terry jones",
"michael palin",
],
}
)
result = process_text(df, column="names", string_function="lower")
assert_frame_equal(result, expected)
|
def test_str_lower():
"""Test string converts to lowercase"""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"graham chapman",
"john cleese",
"terry gilliam",
"eric idle",
"terry jones",
"michael palin",
],
}
)
result = df.process_text(column="names", string_function="lower")
assert_frame_equal(result, expected)
|
5,835 |
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests that bear upon the same hypothesis.
These methods are intended only for combining p-values from hypothesis
tests based upon continuous distributions.
Each method assumes that under the null hypothesis, the p-values are
sampled independently and uniformly from the interval [0, 1]. A test
statistic (different for each method) is computed and a combined
p-value is calculated based upon the distribution of this test statistic
under the null hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests based on
continuous distributions.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}
Name of method to use to combine p-values.
The available methods are (see Notes for details):
* 'fisher': Fisher's method (Fisher's combined probability test)
* 'pearson': Pearson's method
* 'mudholkar_george': Mudholkar's and George's method
* 'tippett': Tippett's method
* 'stouffer': Stouffer's Z-score method
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
res : CombinePValuesResult
An object containing attributes:
statistic : float
The statistic calculated by the specified method.
pvalue : float
The combined p-value.
Notes
-----
If this function is applied to tests with a discrete statistics such as
any rank test or contingency-table test, it will yield systematically
wrong results, e.g. Fisher's method will systematically overestimate the
p-value [1]_. This problem becomes less severe for large sample sizes
when the discrete distributions become approximately continuous.
The differences between the methods can be best illustrated by their
statistics and what aspects of a combination of p-values they emphasise
when considering significance [2]_. For example, methods emphasising large
p-values are more sensitive to strong false and true negatives; conversely
methods focussing on small p-values are sensitive to positives.
* The statistics of Fisher's method (also known as Fisher's combined
probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is
equivalent (as a test statistics) to the product of individual p-values:
:math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows
a :math:`\\chi^2` distribution. This method emphasises small p-values.
* Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent
to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
It thus emphasises large p-values.
* Mudholkar and George compromise between Fisher's and Pearson's method by
averaging their statistics [4]_. Their method emphasises extreme
p-values, both close to 1 and 0.
* Stouffer's method [5]_ uses Z-scores and the statistic:
:math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
standard normal distribution. The advantage of this method is that it is
straightforward to introduce weights, which can make Stouffer's method
more powerful than Fisher's method when the p-values are from studies
of different size [6]_ [7]_.
* Tippett's method uses the smallest p-value as a statistic.
(Mind that this minimum is not the combined p-value.)
Fisher's method may be extended to combine p-values from dependent tests
[8]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
Distributions." Journal of the American Statistical Association 57,
no. 297 (1962), 10-19.
.. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [6] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = 2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.cdf(-statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.cdf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
f"Invalid method {method!r}. Valid methods are 'fisher', "
"'pearson', 'mudholkar_george', 'tippett', and 'stouffer'"
)
return CombinePValuesResult(statistic, pval)
|
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests that bear upon the same hypothesis.
These methods are intended only for combining p-values from hypothesis
tests based upon continuous distributions.
Each method assumes that under the null hypothesis, the p-values are
sampled independently and uniformly from the interval [0, 1]. A test
statistic (different for each method) is computed and a combined
p-value is calculated based upon the distribution of this test statistic
under the null hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests based on
continuous distributions.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}
Name of method to use to combine p-values.
The available methods are (see Notes for details):
* 'fisher': Fisher's method (Fisher's combined probability test)
* 'pearson': Pearson's method
* 'mudholkar_george': Mudholkar's and George's method
* 'tippett': Tippett's method
* 'stouffer': Stouffer's Z-score method
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
res : CombinePValuesResult
An unpackable tuple-like object with named attributes:
statistic : float
The statistic calculated by the specified method.
pvalue : float
The combined p-value.
Notes
-----
If this function is applied to tests with a discrete statistics such as
any rank test or contingency-table test, it will yield systematically
wrong results, e.g. Fisher's method will systematically overestimate the
p-value [1]_. This problem becomes less severe for large sample sizes
when the discrete distributions become approximately continuous.
The differences between the methods can be best illustrated by their
statistics and what aspects of a combination of p-values they emphasise
when considering significance [2]_. For example, methods emphasising large
p-values are more sensitive to strong false and true negatives; conversely
methods focussing on small p-values are sensitive to positives.
* The statistics of Fisher's method (also known as Fisher's combined
probability test) [3]_ is :math:`-2\\sum_i \\log(p_i)`, which is
equivalent (as a test statistics) to the product of individual p-values:
:math:`\\prod_i p_i`. Under the null hypothesis, this statistics follows
a :math:`\\chi^2` distribution. This method emphasises small p-values.
* Pearson's method uses :math:`-2\\sum_i\\log(1-p_i)`, which is equivalent
to :math:`\\prod_i \\frac{1}{1-p_i}` [2]_.
It thus emphasises large p-values.
* Mudholkar and George compromise between Fisher's and Pearson's method by
averaging their statistics [4]_. Their method emphasises extreme
p-values, both close to 1 and 0.
* Stouffer's method [5]_ uses Z-scores and the statistic:
:math:`\\sum_i \\Phi^{-1} (p_i)`, where :math:`\\Phi` is the CDF of the
standard normal distribution. The advantage of this method is that it is
straightforward to introduce weights, which can make Stouffer's method
more powerful than Fisher's method when the p-values are from studies
of different size [6]_ [7]_.
* Tippett's method uses the smallest p-value as a statistic.
(Mind that this minimum is not the combined p-value.)
Fisher's method may be extended to combine p-values from dependent tests
[8]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] Kincaid, W. M., "The Combination of Tests Based on Discrete
Distributions." Journal of the American Statistical Association 57,
no. 297 (1962), 10-19.
.. [2] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [3] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [4] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [5] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [6] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [7] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [8] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = 2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.cdf(-statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.cdf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
f"Invalid method {method!r}. Valid methods are 'fisher', "
"'pearson', 'mudholkar_george', 'tippett', and 'stouffer'"
)
return CombinePValuesResult(statistic, pval)
|
53,319 |
def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
-------
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of `~plasmapy.analysis.nullpoint.NullPoint` objects
representing the nullpoints of the given vector space.
Notes
-------
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
14,707 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
conf = hass.data.get(DATA_NETATMO_CONFIG, {})
try:
home_data = HomeData(conf)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(conf, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NetAtmo Thermostat."""
import pyatmo
homes_conf = config.get(CONF_HOMES)
auth = hass.data[DATA_NETATMO_AUTH]
try:
home_data = HomeData(conf)
except pyatmo.NoDevice:
return
homes = []
rooms = {}
if homes_conf is not None:
for home_conf in homes_conf:
home = home_conf[CONF_NAME]
if home_conf[CONF_ROOMS] != []:
rooms[home] = home_conf[CONF_ROOMS]
homes.append(home)
else:
homes = home_data.get_home_names()
devices = []
for home in homes:
_LOGGER.debug("Setting up %s ...", home)
try:
room_data = ThermostatData(conf, home)
except pyatmo.NoDevice:
continue
for room_id in room_data.get_room_ids():
room_name = room_data.homedata.rooms[home][room_id]['name']
_LOGGER.debug("Setting up %s (%s) ...", room_name, room_id)
if home in rooms and room_name not in rooms[home]:
_LOGGER.debug("Excluding %s ...", room_name)
continue
_LOGGER.debug("Adding devices for room %s (%s) ...",
room_name, room_id)
devices.append(NetatmoThermostat(room_data, room_id))
add_entities(devices, True)
|
44,768 |
def _serve_pyfunc(model):
conf = model.flavors[pyfunc.FLAVOR_NAME]
bash_cmds = []
if os.environ.get('OPTIMIZED_IMAGE'):
opt_image = os.environ.get('OPTIMIZED_IMAGE')
else:
opt_image = "false"
print(opt_image)
if pyfunc.ENV in conf and opt_image == "false":
print("Not an Optimized Image")
if not os.environ.get(DISABLE_ENV_CREATION) == "true":
_install_pyfunc_deps(MODEL_PATH, install_mlflow=True)
bash_cmds += ["source /miniconda/bin/activate custom_env"]
else:
print("Optimized image")
nginx_conf = resource_filename(mlflow.models.__name__, "container/scoring_server/nginx.conf")
# option to disable manually nginx. The default behavior is to enable nginx.
start_nginx = False if os.getenv(DISABLE_NGINX, 'false').lower() == 'true' else True
nginx = Popen(['nginx', '-c', nginx_conf]) if start_nginx else None
# link the log streams to stdout/err so they will be logged to the container logs.
# Default behavior is to do the redirection unless explicitly specified by environment variable.
if start_nginx:
check_call(['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log'])
check_call(['ln', '-sf', '/dev/stderr', '/var/log/nginx/error.log'])
cpu_count = multiprocessing.cpu_count()
os.system("pip -V")
os.system("python -V")
os.system('python -c"from mlflow.version import VERSION as V; print(V)"')
cmd = "gunicorn -w {cpu_count} ".format(cpu_count=cpu_count) + \
"${GUNICORN_CMD_ARGS} mlflow.models.container.scoring_server.wsgi:app"
bash_cmds.append(cmd)
gunicorn = Popen(["/bin/bash", "-c", " && ".join(bash_cmds)])
procs = [p for p in [nginx, gunicorn] if p]
signal.signal(signal.SIGTERM, lambda a, b: _sigterm_handler(pids=[p.pid for p in procs]))
# If either subprocess exits, so do we.
awaited_pids = _await_subprocess_exit_any(procs=procs)
_sigterm_handler(awaited_pids)
|
def _serve_pyfunc(model):
conf = model.flavors[pyfunc.FLAVOR_NAME]
bash_cmds = []
opt_image = os.environ.get('OPTIMIZED_IMAGE') or "false"
if pyfunc.ENV in conf and opt_image == "false":
print("Not an Optimized Image")
if not os.environ.get(DISABLE_ENV_CREATION) == "true":
_install_pyfunc_deps(MODEL_PATH, install_mlflow=True)
bash_cmds += ["source /miniconda/bin/activate custom_env"]
else:
print("Optimized image")
nginx_conf = resource_filename(mlflow.models.__name__, "container/scoring_server/nginx.conf")
# option to disable manually nginx. The default behavior is to enable nginx.
start_nginx = False if os.getenv(DISABLE_NGINX, 'false').lower() == 'true' else True
nginx = Popen(['nginx', '-c', nginx_conf]) if start_nginx else None
# link the log streams to stdout/err so they will be logged to the container logs.
# Default behavior is to do the redirection unless explicitly specified by environment variable.
if start_nginx:
check_call(['ln', '-sf', '/dev/stdout', '/var/log/nginx/access.log'])
check_call(['ln', '-sf', '/dev/stderr', '/var/log/nginx/error.log'])
cpu_count = multiprocessing.cpu_count()
os.system("pip -V")
os.system("python -V")
os.system('python -c"from mlflow.version import VERSION as V; print(V)"')
cmd = "gunicorn -w {cpu_count} ".format(cpu_count=cpu_count) + \
"${GUNICORN_CMD_ARGS} mlflow.models.container.scoring_server.wsgi:app"
bash_cmds.append(cmd)
gunicorn = Popen(["/bin/bash", "-c", " && ".join(bash_cmds)])
procs = [p for p in [nginx, gunicorn] if p]
signal.signal(signal.SIGTERM, lambda a, b: _sigterm_handler(pids=[p.pid for p in procs]))
# If either subprocess exits, so do we.
awaited_pids = _await_subprocess_exit_any(procs=procs)
_sigterm_handler(awaited_pids)
|
38,515 |
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# Star-shaped cell centers are trivial for 1d or 0d grids. No need to compute them:
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
30,524 |
def get_mac_vendor(client):
args = demisto.args()
mac_address = args.get('address')
try:
title = ("%s - Results for MAC Address Query" % INTEGRATION_NAME)
raws = []
macvendors_ec = []
raw_response = client.query(address=mac_address)
if raw_response:
raws.append(raw_response)
macvendors_ec.append({
'Mac': mac_address,
'Vendor': raw_response['result'].get('company'),
'Type': raw_response['result'].get('type'),
'Address': raw_response['result'].get('address')
})
if not raws:
return ("%s - Could not find any results for given query" % INTEGRATION_NAME)
context_entry = {
"MACVendors": macvendors_ec
}
human_readable = tableToMarkdown(t=context_entry.get("MACVendors"), name=title)
return [human_readable, context_entry, raws]
except Exception as e:
LOG(e)
return_error(e)
|
def get_mac_vendor(client):
args = demisto.args()
mac_address = args.get('address')
try:
title = ("%s - Results for MAC Address Query" % INTEGRATION_NAME)
raws = []
macvendors_ec = []
raw_response = client.query(address=mac_address)
if raw_response:
raws.append(raw_response)
macvendors_ec.append({
'MACAddress': mac_address,
'Vendor': raw_response['result'].get('company'),
'Type': raw_response['result'].get('type'),
'Address': raw_response['result'].get('address')
})
if not raws:
return ("%s - Could not find any results for given query" % INTEGRATION_NAME)
context_entry = {
"MACVendors": macvendors_ec
}
human_readable = tableToMarkdown(t=context_entry.get("MACVendors"), name=title)
return [human_readable, context_entry, raws]
except Exception as e:
LOG(e)
return_error(e)
|
34,664 |
def _plot_summary_reports(
intent_summary_diverse: Dict[Text, Dict[Text, float]],
intent_summary_random: Dict[Text, Dict[Text, float]],
changed_intents_diverse: Set[Text],
changed_intents_random: Set[Text],
output_directory_diverse: Text,
output_directory_random: Text,
):
for metric in ["precision", "recall", "f1-score"]:
output_file_diverse = os.path.join(
output_directory_diverse, f"{metric}_changes.png"
)
rasa.utils.plotting.plot_intent_augmentation_summary(
augmentation_summary=intent_summary_diverse,
changed_intents=changed_intents_diverse,
metric=metric,
output_file=output_file_diverse,
)
output_file_random = os.path.join(
output_directory_random, f"{metric}_changes.png"
)
rasa.utils.plotting.plot_intent_augmentation_summary(
augmentation_summary=intent_summary_random,
changed_intents=changed_intents_random,
metric=metric,
output_file=output_file_random,
)
|
def _plot_summary_reports(
intent_summary_diverse: Dict[Text, Dict[Text, float]],
intent_summary_random: Dict[Text, Dict[Text, float]],
changed_intents_diverse: Set[Text],
changed_intents_random: Set[Text],
output_directory_diverse: Text,
output_directory_random: Text,
) -> None:
for metric in ["precision", "recall", "f1-score"]:
output_file_diverse = os.path.join(
output_directory_diverse, f"{metric}_changes.png"
)
rasa.utils.plotting.plot_intent_augmentation_summary(
augmentation_summary=intent_summary_diverse,
changed_intents=changed_intents_diverse,
metric=metric,
output_file=output_file_diverse,
)
output_file_random = os.path.join(
output_directory_random, f"{metric}_changes.png"
)
rasa.utils.plotting.plot_intent_augmentation_summary(
augmentation_summary=intent_summary_random,
changed_intents=changed_intents_random,
metric=metric,
output_file=output_file_random,
)
|
47,379 |
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`~transformers.RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires PyTorch, datasets and faiss")(test_case)
return test_case
|
def require_retrieval(test_case):
"""
Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with
[`RagRetriever`].
These tests are skipped when respective libraries are not installed.
"""
if not (is_torch_available() and is_datasets_available() and is_faiss_available()):
test_case = unittest.skip("test requires PyTorch, datasets and faiss")(test_case)
return test_case
|
32,532 |
def get_incidents_batch_by_time_request(client, params):
"""Perform an API request to get incidents from ProofPoint in batches to prevent a timeout.
As the api does not return the results in an specific order, we query the api on specific time frames using
created_before and created_after using the fetch delta parameter.
Args:
params(dict): The params of the request
Returns:
list. The incidents returned from the API call
"""
incidents_list = [] # type:list
fetch_delta = params.get('fetch_delta', '6 hours')
fetch_limit = arg_to_number(params.get('fetch_limit', '100'))
last_fetched_id = arg_to_number(params.get('last_fetched_id', '0'))
current_time = datetime.now()
time_delta = get_time_delta(fetch_delta)
created_after = datetime.strptime(params.get('created_after'), TIME_FORMAT)
created_before = created_after + time_delta
request_params = {
'state': params.get('state'),
'created_after': created_after.isoformat().split('.')[0] + 'Z',
'created_before': created_before.isoformat().split('.')[0] + 'Z'
}
# while loop relevant for fetching old incidents
while created_before < current_time and len(incidents_list) < fetch_limit: # type: ignore[operator]
demisto.info(
f"Entered the batch loop , with fetch_limit {fetch_limit} and events list "
f"{[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)} "
f"with created_after {request_params['created_after']} and "
f"created_before {request_params['created_before']}")
new_incidents = get_new_incidents(client, request_params, last_fetched_id)
incidents_list.extend(new_incidents)
# advancing fetch time by given fetch delta time
created_after = created_before
created_before = created_before + time_delta
# updating params according to the new times
request_params['created_after'] = created_after.isoformat().split('.')[0] + 'Z'
request_params['created_before'] = created_before.isoformat().split('.')[0] + 'Z'
demisto.debug(f"End of the current batch loop with {str(len(incidents_list))} events")
# fetching the last batch when created_before is bigger then current time = fetching new events
if len(incidents_list) < fetch_limit: # type: ignore[operator]
# fetching the last batch
request_params['created_before'] = current_time.isoformat().split('.')[0] + 'Z'
new_incidents = get_new_incidents(client, request_params, last_fetched_id)
incidents_list.extend(new_incidents)
demisto.info(
f"Finished the last batch, with fetch_limit {fetch_limit} and events list:"
f" {[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)}")
incidents_list_limit = incidents_list[:fetch_limit]
return incidents_list_limit
|
def get_incidents_batch_by_time_request(client, params):
"""Perform an API request to get incidents from ProofPoint in batches to prevent a timeout.
As the api does not return the results in an specific order, we query the api on specific time frames using
created_before and created_after using the fetch delta parameter.
Args:
params(dict): The params of the request
Returns:
list. The incidents returned from the API call
"""
incidents_list = [] # type:list
fetch_delta = params.get('fetch_delta', '6 hours')
fetch_limit = arg_to_number(params.get('fetch_limit', '100'))
last_fetched_id = arg_to_number(params.get('last_fetched_id', '0'))
current_time = datetime.now()
time_delta = get_time_delta(fetch_delta)
created_after = datetime.strptime(params.get('created_after'), TIME_FORMAT)
created_before = created_after + time_delta
request_params = {
'state': params.get('state'),
'created_after': created_after.isoformat().split('.')[0] + 'Z',
'created_before': created_before.isoformat().split('.')[0] + 'Z'
}
# while loop relevant for fetching old incidents
while created_before < current_time and len(incidents_list) < fetch_limit: # type: ignore[operator]
demisto.info(
f"Entered the batch loop , with fetch_limit {fetch_limit} and events list "
f"{[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)} "
f"with created_after {request_params['created_after']} and "
f"created_before {request_params['created_before']}")
new_incidents = get_new_incidents(client, request_params, last_fetched_id)
incidents_list.extend(new_incidents)
# advancing fetch time by given fetch delta time
created_after = created_before
created_before = created_before + time_delta
# updating params according to the new times
request_params['created_after'] = created_after.isoformat().split('.')[0] + 'Z'
request_params['created_before'] = created_before.isoformat().split('.')[0] + 'Z'
demisto.debug(f"End of the current batch loop with {str(len(incidents_list))} events")
# fetching the last batch when created_before is bigger then current time = fetching new events
if len(incidents_list) < fetch_limit: # type: ignore[operator]
# fetching the last batch
request_params['created_before'] = current_time.isoformat().split('.')[0] + 'Z'
new_incidents = get_new_incidents(client, request_params, last_fetched_id)
incidents_list.extend(new_incidents)
demisto.debug(
f"Finished the last batch, with fetch_limit {fetch_limit} and events list:"
f" {[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)}")
incidents_list_limit = incidents_list[:fetch_limit]
return incidents_list_limit
|
45,989 |
def adjust_sigmoid(image: Tensor, cutoff: float = 0.5, gain: float = 10, inv: bool = False) -> Tensor:
"""Adjust sigmoid correction on the input image tensor.
The input image is expected to be in the range of [0, 1].
Reference:
[1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions",
http://markfairchild.org/PDFs/PAP07.pdf
Args:
image: Image to be adjusted in the shape of :math:`(*, H, W)`.
cutoff: The cutoff of sigmoid function.
gain: The multiplier of sigmoid function.
inv: If is set to True the function will return the inverse sigmoid correction.
Returns:
Adjusted tensor in the shape of :math:`(*, H, W)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_sigmoid(x, gain=0)
tensor([[[[0.5000, 0.5000],
[0.5000, 0.5000]]]])
"""
KORNIA_CHECK_IS_TENSOR(image, "Expected shape (*, H, W)")
if inv:
img_adjust = (1 - 1 / (1 + torch.exp(gain * (cutoff - image))))
else:
img_adjust = (1 / (1 + torch.exp(gain * (cutoff - image))))
return img_adjust
|
def adjust_sigmoid(image: Tensor, cutoff: float = 0.5, gain: float = 10, inv: bool = False) -> Tensor:
"""Adjust sigmoid correction on the input image tensor.
The input image is expected to be in the range of [0, 1].
Reference:
[1]: Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast Enhancement Functions",
http://markfairchild.org/PDFs/PAP07.pdf
Args:
image: Image to be adjusted in the shape of :math:`(*, H, W)`.
cutoff: The cutoff of sigmoid function.
gain: The multiplier of sigmoid function.
inv: If is set to True the function will return the inverse sigmoid correction.
Returns:
Adjusted tensor in the shape of :math:`(*, H, W)`.
Example:
>>> x = torch.ones(1, 1, 2, 2)
>>> adjust_sigmoid(x, gain=0)
tensor([[[[0.5000, 0.5000],
[0.5000, 0.5000]]]])
"""
KORNIA_CHECK_IS_TENSOR(image, "Expected shape (*, H, W)")
if inv:
img_adjust = (1 - 1 / (1 + (gain * (cutoff - image)).exp()))
else:
img_adjust = (1 / (1 + torch.exp(gain * (cutoff - image))))
return img_adjust
|
7,150 |
def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
compactness=0, watershed_line=False):
"""Find watershed basins in `image` flooded from given `markers`.
Parameters
----------
image: ndarray (2-D, 3-D, ...) of integers
Data array where the lowest value points are labeled first.
markers: int, or ndarray of int, same shape as `image`, optional
The desired number of markers, or an array marking the basins with the
values to be assigned in the label matrix. Zero means not a marker. If
no markers are given, the local minima of the image are used as
markers.
connectivity: ndarray, optional
An array with the same number of dimensions as `image` whose
non-zero elements indicate neighbors for connection.
Following the scipy convention, default is a one-connected array of
the dimension of the image.
offset: array_like of shape image.ndim, optional
offset of the connectivity (one offset per dimension)
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
will be labeled.
compactness : float, optional
Use compact watershed [3]_ with given compactness parameter.
Higher values result in more regularly-shaped watershed basins.
watershed_line : bool, optional
If watershed_line is True, a one-pixel wide line separates the regions
obtained by the watershed algorithm. The line has the label 0.
Returns
-------
out: ndarray
A labeled matrix of the same type and shape as markers
See also
--------
skimage.segmentation.random_walker: random walker segmentation
A segmentation algorithm based on anisotropic diffusion, usually
slower than the watershed but with good results on noisy data and
boundaries with holes.
Notes
-----
This function implements a watershed algorithm [1]_ [2]_ that apportions
pixels into marked basins. The algorithm uses a priority queue to hold
the pixels with the metric for the priority queue being pixel value, then
the time of entry into the queue - this settles ties in favor of the
closest marker.
Some ideas taken from
Soille, "Automated Basin Delineation from Digital Elevation Models Using
Mathematical Morphology", Signal Processing 20 (1990) 171-182
The most important insight in the paper is that entry time onto the queue
solves two problems: a pixel should be assigned to the neighbor with the
largest gradient or, if there is no gradient, pixels on a plateau should
be split between markers on opposite sides.
This implementation converts all arguments to specific, lowest common
denominator types, then passes these to a C algorithm.
Markers can be determined manually, or automatically using for example
the local minima of the gradient of the image, or the local maxima of the
distance function to the background for separating overlapping objects
(see example).
References
----------
.. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
.. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
.. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf
Examples
--------
The watershed algorithm is useful to separate overlapping objects.
We first generate an initial image with two overlapping circles:
>>> x, y = np.indices((80, 80))
>>> x1, y1, x2, y2 = 28, 28, 44, 52
>>> r1, r2 = 16, 20
>>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
>>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
>>> image = np.logical_or(mask_circle1, mask_circle2)
Next, we want to separate the two circles. We generate markers at the
maxima of the distance to the background:
>>> from scipy import ndimage as ndi
>>> distance = ndi.distance_transform_edt(image)
>>> from skimage.feature import peak_local_max
>>> local_maxi = peak_local_max(distance, labels=image,
... footprint=np.ones((3, 3)),
... indices=False)
>>> markers = ndi.label(local_maxi)[0]
Finally, we run the watershed on the image and markers:
>>> labels = watershed(-distance, markers, mask=image)
The algorithm works also for 3-D images, and can be used for example to
separate overlapping spheres.
"""
image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
connectivity, offset = _validate_connectivity(image.ndim, connectivity,
offset)
# pad the image, markers, and mask so that we can use the mask to
# keep from running off the edges
pad_width = [(p, p) for p in offset]
image = np.pad(image, pad_width, mode='constant')
mask = np.pad(mask, pad_width, mode='constant').ravel()
output = np.pad(markers, pad_width, mode='constant')
flat_neighborhood = _offsets_to_raveled_neighbors(
image.shape, connectivity, center=offset)
marker_locations = np.flatnonzero(output)
image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
_watershed.watershed_raveled(image.ravel(),
marker_locations, flat_neighborhood,
mask, image_strides, compactness,
output.ravel(),
watershed_line)
output = crop(output, pad_width, copy=True)
return output
|
def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
compactness=0, watershed_line=False):
"""Find watershed basins in `image` flooded from given `markers`.
Parameters
----------
image: ndarray (2-D, 3-D, ...) of integers
Data array where the lowest value points are labeled first.
markers: int, or ndarray of int, same shape as `image`, optional
The desired number of markers, or an array marking the basins with the
values to be assigned in the label matrix. Zero means not a marker. If
no ``None``, the local minima of the image are used as
markers.
connectivity: ndarray, optional
An array with the same number of dimensions as `image` whose
non-zero elements indicate neighbors for connection.
Following the scipy convention, default is a one-connected array of
the dimension of the image.
offset: array_like of shape image.ndim, optional
offset of the connectivity (one offset per dimension)
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
will be labeled.
compactness : float, optional
Use compact watershed [3]_ with given compactness parameter.
Higher values result in more regularly-shaped watershed basins.
watershed_line : bool, optional
If watershed_line is True, a one-pixel wide line separates the regions
obtained by the watershed algorithm. The line has the label 0.
Returns
-------
out: ndarray
A labeled matrix of the same type and shape as markers
See also
--------
skimage.segmentation.random_walker: random walker segmentation
A segmentation algorithm based on anisotropic diffusion, usually
slower than the watershed but with good results on noisy data and
boundaries with holes.
Notes
-----
This function implements a watershed algorithm [1]_ [2]_ that apportions
pixels into marked basins. The algorithm uses a priority queue to hold
the pixels with the metric for the priority queue being pixel value, then
the time of entry into the queue - this settles ties in favor of the
closest marker.
Some ideas taken from
Soille, "Automated Basin Delineation from Digital Elevation Models Using
Mathematical Morphology", Signal Processing 20 (1990) 171-182
The most important insight in the paper is that entry time onto the queue
solves two problems: a pixel should be assigned to the neighbor with the
largest gradient or, if there is no gradient, pixels on a plateau should
be split between markers on opposite sides.
This implementation converts all arguments to specific, lowest common
denominator types, then passes these to a C algorithm.
Markers can be determined manually, or automatically using for example
the local minima of the gradient of the image, or the local maxima of the
distance function to the background for separating overlapping objects
(see example).
References
----------
.. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
.. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
.. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
https://www.tu-chemnitz.de/etit/proaut/forschung/rsrc/cws_pSLIC_ICPR.pdf
Examples
--------
The watershed algorithm is useful to separate overlapping objects.
We first generate an initial image with two overlapping circles:
>>> x, y = np.indices((80, 80))
>>> x1, y1, x2, y2 = 28, 28, 44, 52
>>> r1, r2 = 16, 20
>>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
>>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
>>> image = np.logical_or(mask_circle1, mask_circle2)
Next, we want to separate the two circles. We generate markers at the
maxima of the distance to the background:
>>> from scipy import ndimage as ndi
>>> distance = ndi.distance_transform_edt(image)
>>> from skimage.feature import peak_local_max
>>> local_maxi = peak_local_max(distance, labels=image,
... footprint=np.ones((3, 3)),
... indices=False)
>>> markers = ndi.label(local_maxi)[0]
Finally, we run the watershed on the image and markers:
>>> labels = watershed(-distance, markers, mask=image)
The algorithm works also for 3-D images, and can be used for example to
separate overlapping spheres.
"""
image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
connectivity, offset = _validate_connectivity(image.ndim, connectivity,
offset)
# pad the image, markers, and mask so that we can use the mask to
# keep from running off the edges
pad_width = [(p, p) for p in offset]
image = np.pad(image, pad_width, mode='constant')
mask = np.pad(mask, pad_width, mode='constant').ravel()
output = np.pad(markers, pad_width, mode='constant')
flat_neighborhood = _offsets_to_raveled_neighbors(
image.shape, connectivity, center=offset)
marker_locations = np.flatnonzero(output)
image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
_watershed.watershed_raveled(image.ravel(),
marker_locations, flat_neighborhood,
mask, image_strides, compactness,
output.ravel(),
watershed_line)
output = crop(output, pad_width, copy=True)
return output
|
37,918 |
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(
f"Error loading the GMT shared library '{libname}'.\n{err}"
)
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
lib_fullnames = lib_fullnames or clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(
f"Error loading the GMT shared library '{libname}'.\n{err}"
)
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
49,697 |
def get_versions(reporev=True):
"""Get version information for components used by Spyder"""
import sys
import platform
import qtpy
import qtpy.QtCore
revision = None
if reporev:
from spyder.utils import vcs
revision, branch = vcs.get_git_revision(os.path.dirname(__current_directory__))
if not sys.platform == 'darwin': # To avoid a crash with our Mac app
system = platform.system()
else:
system = 'Darwin'
return {
'spyder': __version__,
'python': platform.python_version(), # "2.7.3"
'bitness': 64 if sys.maxsize > 2**32 else 32,
'qt': qtpy.QtCore.__version__,
'qt_api': qtpy.API_NAME, # PyQt5
'qt_api_ver': qtpy.PYQT_VERSION,
'system': system, # Linux, Windows, ...
'release': platform.release(), # XP, 10.6, 2.2.0, etc.
'revision': revision, # '9fdf926eccce',
'branch': branch, # '4.x' or master
}
|
def get_versions(reporev=True):
"""Get version information for components used by Spyder"""
import sys
import platform
import qtpy
import qtpy.QtCore
revision = None
if reporev:
from spyder.utils import vcs
revision, branch = vcs.get_git_revision(
os.path.dirname(__current_directory__))
if not sys.platform == 'darwin': # To avoid a crash with our Mac app
system = platform.system()
else:
system = 'Darwin'
return {
'spyder': __version__,
'python': platform.python_version(), # "2.7.3"
'bitness': 64 if sys.maxsize > 2**32 else 32,
'qt': qtpy.QtCore.__version__,
'qt_api': qtpy.API_NAME, # PyQt5
'qt_api_ver': qtpy.PYQT_VERSION,
'system': system, # Linux, Windows, ...
'release': platform.release(), # XP, 10.6, 2.2.0, etc.
'revision': revision, # '9fdf926eccce',
'branch': branch, # '4.x' or master
}
|
14,059 |
def points_from_xy(x, y, z=None, crs=None):
"""
Generate GeometryArray of shapely Point geometries from x, y(, z) coordinates.
In case of geographic coordinates, it is assumed that longitude is captured by
``x`` coordinates and latitude by ``y``.
Parameters
----------
x, y, z : iterable
crs : value, optional
Coordinate Reference System of the geometry objects. Can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [0, 1, 2], 'y': [0, 1, 2], 'z': [0, 1, 2]})
>>> df
x y z
0 0 0 0
1 1 1 1
2 2 2 2
>>> geometry = geopandas.points_from_xy(x=[1, 0], y=[0, 1])
>>> geometry = geopandas.points_from_xy(df['x'], df['y'], df['z'])
>>> gdf = geopandas.GeoDataFrame(
... df, geometry=geopandas.points_from_xy(df['x'], df['y']))
Having geographic coordinates:
>>> df = pd.DataFrame({'longitude': [-140, 0, 123], 'latitude': [-65, 1, 48]})
>>> df
longitude latitude
0 -140 -65
1 0 1
2 123 48
>>> geometry = geopandas.points_from_xy(df.longitude, df.latitude, crs=4326)
Returns
-------
output : GeometryArray
"""
return GeometryArray(vectorized.points_from_xy(x, y, z), crs=crs)
|
def points_from_xy(x, y, z=None, crs=None):
"""
Generate GeometryArray of shapely Point geometries from x, y(, z) coordinates.
In case of geographic coordinates, it is assumed that longitude is captured by
``x`` coordinates and latitude by ``y``.
Parameters
----------
x, y, z : iterable
crs : value, optional
Coordinate Reference System of the geometry objects. Can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [0, 1, 2], 'y': [0, 1, 2], 'z': [0, 1, 2]})
>>> df
x y z
0 0 0 0
1 1 1 1
2 2 2 2
>>> geometry = geopandas.points_from_xy(x=[1, 0], y=[0, 1])
>>> geometry = geopandas.points_from_xy(df['x'], df['y'], df['z'])
>>> gdf = geopandas.GeoDataFrame(
... df, geometry=geopandas.points_from_xy(df['x'], df['y']))
Having geographic coordinates:
>>> df = pd.DataFrame({'longitude': [-140, 0, 123], 'latitude': [-65, 1, 48]})
>>> df
longitude latitude
0 -140 -65
1 0 1
2 123 48
>>> geometry = geopandas.points_from_xy(df.longitude, df.latitude, crs="EPSG:4326")
Returns
-------
output : GeometryArray
"""
return GeometryArray(vectorized.points_from_xy(x, y, z), crs=crs)
|
33,117 |
def with_recursion(func, prog=False):
"""Make function recursive in its 1st arg.
Return a version of `func` whose 2nd argument (`k`)
specifies the number of times to times apply func on its output.
.. caution:: Only the first argument to `func` will change,
so, for example, if `func` is `step(x, t, dt)`,
it will get fed the same `t` and `dt` at each iteration.
Parameters
----------
func : function
Run the input function recursively.
prog : bool or str
Determine the mode of progress bar.
Returns
-------
fun_k : function
A function that returns the sequence generated by recursively
run func (Trajectory of model evolution).
Stepping of dynamical system
Examples
--------
>>> def dxdt(x):
... return -x
>>> step_1 = with_rk4(dxdt, autonom=True)
>>> step_k = with_recursion(step_1)
>>> x0 = np.arange(3)
>>> x7 = step_k(x0, 7, t0=np.nan, dt=0.1)[-1]
>>> x7_true = x0 * np.exp(-0.7)
>>> np.allclose(x7, x7_true)
True
"""
def fun_k(x0, k, *args, **kwargs):
xx = np.zeros((k+1,)+x0.shape)
xx[0] = x0
rg = range(k)
if isinstance(prog, str):
rg = progbar(rg, prog)
elif prog:
rg = progbar(rg, 'Recurs.')
for i in rg:
xx[i+1] = func(xx[i], *args, **kwargs)
return xx
return fun_k
|
def with_recursion(func, prog=False):
"""Make function recursive in its 1st arg.
Return a version of `func` whose 2nd argument (`k`)
specifies the number of times to times apply func on its output.
.. caution:: Only the first argument to `func` will change,
so, for example, if `func` is `step(x, t, dt)`,
it will get fed the same `t` and `dt` at each iteration.
Parameters
----------
func : function
Run the input function recursively.
prog : bool or str
Enable/Disable progressbar. If `str`, set its name to this.
Returns
-------
fun_k : function
A function that returns the sequence generated by recursively
run func (Trajectory of model evolution).
Stepping of dynamical system
Examples
--------
>>> def dxdt(x):
... return -x
>>> step_1 = with_rk4(dxdt, autonom=True)
>>> step_k = with_recursion(step_1)
>>> x0 = np.arange(3)
>>> x7 = step_k(x0, 7, t0=np.nan, dt=0.1)[-1]
>>> x7_true = x0 * np.exp(-0.7)
>>> np.allclose(x7, x7_true)
True
"""
def fun_k(x0, k, *args, **kwargs):
xx = np.zeros((k+1,)+x0.shape)
xx[0] = x0
rg = range(k)
if isinstance(prog, str):
rg = progbar(rg, prog)
elif prog:
rg = progbar(rg, 'Recurs.')
for i in rg:
xx[i+1] = func(xx[i], *args, **kwargs)
return xx
return fun_k
|
36,258 |
def highly_variable_genes_seurat_v3(
adata: AnnData,
n_top_genes: int = 2000,
batch_key: Optional[str] = None,
lowess_frac: Optional[float] = 0.15,
):
"""\
Annotate highly variable genes [Stuart19]_.
Expects raw count data.
The major difference in this implementation is the use of lowess insted of loess.
For further details of the sparse arithmetic see https://www.overleaf.com/read/ckptrbgzzzpg
Parameters
----------
adata
The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
n_top_genes
Number of highly-variable genes to keep.
batch_key
If specified, highly-variable genes are selected within each batch separately and merged.
This simple process avoids the selection of batch-specific genes and acts as a
lightweight batch correction method.
lowess_frac
The fraction of the data (cells) used when estimating the variance in the lowess model fit.
"""
import statsmodels
lowess = statsmodels.nonparametric.lowess
if batch_key is None:
batch_info = pd.Categorical(np.zeros((adata.X.shape[0])).astype(int))
else:
batch_info = adata.obs[batch_key]
norm_gene_vars = []
for b in np.unique(batch_info):
mean, var = materialize_as_ndarray(_get_mean_var(adata[batch_info == b].X))
not_const = var > 0
estimat_var = np.zeros((adata.X.shape[1]))
y = np.log10(var[not_const])
x = np.log10(mean[not_const])
# output is sorted by x
v = lowess(y, x, frac=lowess_frac)
estimat_var[not_const][np.argsort(x)] = v[:, 1]
# get normalized variance
reg_std = np.sqrt(10 ** estimat_var)
batch_counts = adata[batch_info == b].X.copy()
# clip large values as in Seurat
N = np.sum(batch_info == b)
vmax = np.sqrt(N)
clip_val = reg_std * vmax + mean
# could be something faster here
for g in range(batch_counts.shape[1]):
batch_counts[:, g][batch_counts[:, g] > vmax] = clip_val[g]
if sp_sparse.issparse(batch_counts):
squared_batch_counts_sum = np.array(batch_counts.power(2).sum(axis=0))
batch_counts_sum = np.array(batch_counts.sum(axis=0))
else:
squared_batch_counts_sum = np.square(batch_counts).sum(axis=0)
batch_counts_sum = batch_counts.sum(axis=0)
norm_gene_var = (1 / ((N - 1) * np.square(reg_std))) * (
(N * np.square(mean))
+ squared_batch_counts_sum
- 2 * batch_counts_sum * mean
)
norm_gene_vars.append(norm_gene_var.reshape(1, -1))
norm_gene_vars = np.concatenate(norm_gene_vars, axis=0)
# argsort twice gives ranks
ranked_norm_gene_vars = np.argsort(np.argsort(norm_gene_vars, axis=1), axis=1)
median_ranked = np.median(ranked_norm_gene_vars, axis=0)
num_batches_high_var = np.sum(
ranked_norm_gene_vars >= (adata.X.shape[1] - n_top_genes), axis=0
)
df = pd.DataFrame(index=np.array(adata.var_names))
df["highly_variable_nbatches"] = num_batches_high_var
df["highly_variable_median_rank"] = median_ranked
df.sort_values(
["highly_variable_nbatches", "highly_variable_median_rank"],
ascending=False,
na_position="last",
inplace=True,
)
df["highly_variable"] = False
df.loc[:n_top_genes, "highly_variable"] = True
df = df.loc[adata.var_names]
adata.var["highly_variable"] = df["highly_variable"].values
if batch_key is not None:
batches = adata.obs[batch_key].cat.categories
adata.var["highly_variable_nbatches"] = df["highly_variable_nbatches"].values
adata.var["highly_variable_intersection"] = df[
"highly_variable_nbatches"
] == len(batches)
adata.var["highly_variable_median_rank"] = df["highly_variable_median_rank"].values
|
def highly_variable_genes_seurat_v3(
adata: AnnData,
n_top_genes: int = 2000,
batch_key: Optional[str] = None,
lowess_frac: Optional[float] = 0.15,
):
"""\
Annotate highly variable genes [Stuart19]_.
Expects raw count data.
The major difference in this implementation is the use of lowess insted of loess.
For further details of the sparse arithmetic see https://www.overleaf.com/read/ckptrbgzzzpg
Parameters
----------
adata
The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
n_top_genes
Number of highly-variable genes to keep.
batch_key
If specified, highly-variable genes are selected within each batch separately and merged.
This simple process avoids the selection of batch-specific genes and acts as a
lightweight batch correction method.
lowess_frac
The fraction of the data (cells) used when estimating the variance in the lowess model fit.
"""
import statsmodels
lowess = statsmodels.nonparametric.lowess
if batch_key is None:
batch_info = pd.Categorical(np.zeros((adata.X.shape[0])).astype(int))
else:
batch_info = adata.obs[batch_key]
norm_gene_vars = []
for b in np.unique(batch_info):
mean, var = materialize_as_ndarray(_get_mean_var(adata[batch_info == b].X))
not_const = var > 0
estimat_var = np.zeros(adata.shape[1])
y = np.log10(var[not_const])
x = np.log10(mean[not_const])
# output is sorted by x
v = lowess(y, x, frac=lowess_frac)
estimat_var[not_const][np.argsort(x)] = v[:, 1]
# get normalized variance
reg_std = np.sqrt(10 ** estimat_var)
batch_counts = adata[batch_info == b].X.copy()
# clip large values as in Seurat
N = np.sum(batch_info == b)
vmax = np.sqrt(N)
clip_val = reg_std * vmax + mean
# could be something faster here
for g in range(batch_counts.shape[1]):
batch_counts[:, g][batch_counts[:, g] > vmax] = clip_val[g]
if sp_sparse.issparse(batch_counts):
squared_batch_counts_sum = np.array(batch_counts.power(2).sum(axis=0))
batch_counts_sum = np.array(batch_counts.sum(axis=0))
else:
squared_batch_counts_sum = np.square(batch_counts).sum(axis=0)
batch_counts_sum = batch_counts.sum(axis=0)
norm_gene_var = (1 / ((N - 1) * np.square(reg_std))) * (
(N * np.square(mean))
+ squared_batch_counts_sum
- 2 * batch_counts_sum * mean
)
norm_gene_vars.append(norm_gene_var.reshape(1, -1))
norm_gene_vars = np.concatenate(norm_gene_vars, axis=0)
# argsort twice gives ranks
ranked_norm_gene_vars = np.argsort(np.argsort(norm_gene_vars, axis=1), axis=1)
median_ranked = np.median(ranked_norm_gene_vars, axis=0)
num_batches_high_var = np.sum(
ranked_norm_gene_vars >= (adata.X.shape[1] - n_top_genes), axis=0
)
df = pd.DataFrame(index=np.array(adata.var_names))
df["highly_variable_nbatches"] = num_batches_high_var
df["highly_variable_median_rank"] = median_ranked
df.sort_values(
["highly_variable_nbatches", "highly_variable_median_rank"],
ascending=False,
na_position="last",
inplace=True,
)
df["highly_variable"] = False
df.loc[:n_top_genes, "highly_variable"] = True
df = df.loc[adata.var_names]
adata.var["highly_variable"] = df["highly_variable"].values
if batch_key is not None:
batches = adata.obs[batch_key].cat.categories
adata.var["highly_variable_nbatches"] = df["highly_variable_nbatches"].values
adata.var["highly_variable_intersection"] = df[
"highly_variable_nbatches"
] == len(batches)
adata.var["highly_variable_median_rank"] = df["highly_variable_median_rank"].values
|
24,740 |
def test_get_map_data():
"""Tests that a SimilarChecker respects the MapReduceMixin interface"""
linter = PyLinter(reporter=Reporter())
# Add a parallel checker to ensure it can map and reduce
linter.register_checker(similar.SimilarChecker(linter))
source_streams = (
SIMILAR_A,
SIMILAR_B,
)
for fname in source_streams:
assert os.path.exists(fname), f"File not found! {fname}"
expected_linelists = (
(
"",
"",
"",
"",
"",
"",
"def adipiscing(elit):",
'etiam = "id"',
'dictum = "purus,"',
'vitae = "pretium"',
'neque = "Vivamus"',
'nec = "ornare"',
'tortor = "sit"',
"return etiam, dictum, vitae, neque, nec, tortor",
"",
"",
"class Amet:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
"",
"def lorem(self, ipsum):",
'dolor = "sit"',
'amet = "consectetur"',
"return (lorem, dolor, amet)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
"return some_var, someother_var, fusce, amet",
"",
'def __init__(self, moleskie, lectus="Mauris", ac="pellentesque"):',
'metus = "ut"',
'lobortis = "urna."',
'Integer = "nisl"',
'(mauris,) = "interdum"',
'non = "odio"',
'semper = "aliquam"',
'malesuada = "nunc."',
'iaculis = "dolor"',
'facilisis = "ultrices"',
'vitae = "ut."',
"",
"return (",
"metus,",
"lobortis,",
"Integer,",
"mauris,",
"non,",
"semper,",
"malesuada,",
"iaculis,",
"facilisis,",
"vitae,",
")",
"",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
),
(
"",
"",
"",
"",
"",
"",
"",
"class Nulla:",
'tortor = "ultrices quis porta in"',
'sagittis = "ut tellus"',
"",
"def pulvinar(self, blandit, metus):",
"egestas = [mauris for mauris in zip(blandit, metus)]",
"neque = (egestas, blandit)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
'iaculis = "dolor"',
"return some_var, someother_var, fusce, amet, iaculis, iaculis",
"",
"",
"def tortor(self):",
"ultrices = 2",
'quis = ultricies * "porta"',
"return ultricies, quis",
"",
"",
"class Commodo:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
'laoreet = "commodo "',
"return agittis, tellus, laoreet",
),
)
data = []
# Manually perform a 'map' type function
for source_fname in source_streams:
sim = similar.SimilarChecker(linter)
with open(source_fname) as stream:
sim.append_stream(source_fname, stream)
# The map bit, can you tell? ;)
data.extend(sim.get_map_data())
assert len(expected_linelists) == len(data)
for source_fname, expected_lines, lineset_obj in zip(
source_streams, expected_linelists, data
):
assert source_fname == lineset_obj.name
# There doesn't seem to be a faster way of doing this, yet.
lines = (line for idx, line in lineset_obj.enumerate_stripped())
assert tuple(expected_lines) == tuple(lines)
|
def test_get_map_data():
"""Tests that a SimilarChecker respects the MapReduceMixin interface"""
linter = PyLinter(reporter=Reporter())
# Add a parallel checker to ensure it can map and reduce
linter.register_checker(similar.SimilarChecker(linter))
source_streams = (
SIMILAR_A,
SIMILAR_B,
)
for fname in source_streams:
assert fname.exists(), f"File not found! {fname}"
expected_linelists = (
(
"",
"",
"",
"",
"",
"",
"def adipiscing(elit):",
'etiam = "id"',
'dictum = "purus,"',
'vitae = "pretium"',
'neque = "Vivamus"',
'nec = "ornare"',
'tortor = "sit"',
"return etiam, dictum, vitae, neque, nec, tortor",
"",
"",
"class Amet:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
"",
"def lorem(self, ipsum):",
'dolor = "sit"',
'amet = "consectetur"',
"return (lorem, dolor, amet)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
"return some_var, someother_var, fusce, amet",
"",
'def __init__(self, moleskie, lectus="Mauris", ac="pellentesque"):',
'metus = "ut"',
'lobortis = "urna."',
'Integer = "nisl"',
'(mauris,) = "interdum"',
'non = "odio"',
'semper = "aliquam"',
'malesuada = "nunc."',
'iaculis = "dolor"',
'facilisis = "ultrices"',
'vitae = "ut."',
"",
"return (",
"metus,",
"lobortis,",
"Integer,",
"mauris,",
"non,",
"semper,",
"malesuada,",
"iaculis,",
"facilisis,",
"vitae,",
")",
"",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
"return agittis, tellus",
),
(
"",
"",
"",
"",
"",
"",
"",
"class Nulla:",
'tortor = "ultrices quis porta in"',
'sagittis = "ut tellus"',
"",
"def pulvinar(self, blandit, metus):",
"egestas = [mauris for mauris in zip(blandit, metus)]",
"neque = (egestas, blandit)",
"",
"def similar_function_5_lines(self, similar):",
"some_var = 10",
"someother_var *= 300",
'fusce = "sit"',
'amet = "tortor"',
'iaculis = "dolor"',
"return some_var, someother_var, fusce, amet, iaculis, iaculis",
"",
"",
"def tortor(self):",
"ultrices = 2",
'quis = ultricies * "porta"',
"return ultricies, quis",
"",
"",
"class Commodo:",
"def similar_function_3_lines(self, tellus):",
"agittis = 10",
"tellus *= 300",
'laoreet = "commodo "',
"return agittis, tellus, laoreet",
),
)
data = []
# Manually perform a 'map' type function
for source_fname in source_streams:
sim = similar.SimilarChecker(linter)
with open(source_fname) as stream:
sim.append_stream(source_fname, stream)
# The map bit, can you tell? ;)
data.extend(sim.get_map_data())
assert len(expected_linelists) == len(data)
for source_fname, expected_lines, lineset_obj in zip(
source_streams, expected_linelists, data
):
assert source_fname == lineset_obj.name
# There doesn't seem to be a faster way of doing this, yet.
lines = (line for idx, line in lineset_obj.enumerate_stripped())
assert tuple(expected_lines) == tuple(lines)
|
31,904 |
def test_get_client_config(mocker):
mcafee_mar = importlib.import_module("McAfee-MAR")
# # Invalid private Key
valid_params = {'private_key': invalid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': valid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Invalid cert file
valid_params = {'private_key': valid_private_key,
'cert_file': invalid_certificate,
'broker_ca_bundle': valid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Invalid broker_ca_bundle
valid_params = {'private_key': valid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': invalid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Everything is valid + spaces
valid_params = {'private_key': valid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': spaces_in_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
mcafee_mar.validate_certificates_format()
|
def test_get_client_config(mocker):
mcafee_mar = importlib.import_module("McAfee-MAR")
# Invalid private Key
valid_params = {'private_key': invalid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': valid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Invalid cert file
valid_params = {'private_key': valid_private_key,
'cert_file': invalid_certificate,
'broker_ca_bundle': valid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Invalid broker_ca_bundle
valid_params = {'private_key': valid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': invalid_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
with pytest.raises(SystemExit):
mcafee_mar.validate_certificates_format()
# Everything is valid + spaces
valid_params = {'private_key': valid_private_key,
'cert_file': valid_certificate,
'broker_ca_bundle': spaces_in_certificate}
mocker.patch.object(demisto, "params", return_value=valid_params)
mcafee_mar.validate_certificates_format()
|
41,073 |
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
logging.warning(' Total parameter of the model = ' + str(sum(p.numel() for p in model.parameters())) )
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError("use '--api v2' option to decode with non-default language model")
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0] if args.num_encs == 1 else [feat[idx][0] for idx in range(model.num_encs)]
if args.streaming_mode == 'window' and args.num_encs == 1:
logging.info('Using streaming recognizer with window size %d frames', args.streaming_window)
se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
for i in range(0, feat.shape[0], args.streaming_window):
logging.info('Feeding frames %d - %d', i, i + args.streaming_window)
se2e.accept_input(feat[i:i + args.streaming_window])
logging.info('Running offline attention decoder')
se2e.decode_with_attention_offline()
logging.info('Offline attention decoder finished')
nbest_hyps = se2e.retrieve_recognition()
elif args.streaming_mode == 'segment' and args.num_encs == 1:
logging.info('Using streaming recognizer with threshold value %d', args.streaming_min_blank_dur)
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({'yseq': [], 'score': 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i:i + r])
if hyps is not None:
text = ''.join([train_args.char_list[int(x)]
for x in hyps[0]['yseq'][1:-1] if int(x) != -1])
text = text.replace('\u2581', ' ').strip() # for SentencePiece
text = text.replace(model.space, ' ')
text = text.replace(model.blank, '')
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]['yseq'].extend(hyps[n]['yseq'])
nbest_hyps[n]['score'] += hyps[n]['score']
else:
nbest_hyps = model.recognize(feat, args, train_args.char_list, rnnlm)
new_js[name] = add_results_to_json(js[name], nbest_hyps, train_args.char_list)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]['input'][0]['shape'][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0] if args.num_encs == 1 else load_inputs_and_targets(batch)
if args.streaming_mode == 'window' and args.num_encs == 1:
raise NotImplementedError
elif args.streaming_mode == 'segment' and args.num_encs == 1:
if args.batchsize > 1:
raise NotImplementedError
feat = feats[0]
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({'yseq': [], 'score': 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i:i + r])
if hyps is not None:
text = ''.join([train_args.char_list[int(x)]
for x in hyps[0]['yseq'][1:-1] if int(x) != -1])
text = text.replace('\u2581', ' ').strip() # for SentencePiece
text = text.replace(model.space, ' ')
text = text.replace(model.blank, '')
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]['yseq'].extend(hyps[n]['yseq'])
nbest_hyps[n]['score'] += hyps[n]['score']
nbest_hyps = [nbest_hyps]
else:
nbest_hyps = model.recognize_batch(feats, args, train_args.char_list, rnnlm=rnnlm)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(js[name], nbest_hyp, train_args.char_list)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
|
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.recog_args = args
logging.info(' Total parameter of the model = ' + str(sum(p.numel() for p in model.parameters())) )
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError("use '--api v2' option to decode with non-default language model")
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.word_rnnlm:
rnnlm_args = get_model_conf(args.word_rnnlm, args.word_rnnlm_conf)
word_dict = rnnlm_args.char_list_dict
char_dict = {x: i for i, x in enumerate(train_args.char_list)}
word_rnnlm = lm_pytorch.ClassifierWithState(lm_pytorch.RNNLM(
len(word_dict), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.word_rnnlm, word_rnnlm)
word_rnnlm.eval()
if rnnlm is not None:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.MultiLevelLM(word_rnnlm.predictor,
rnnlm.predictor, word_dict, char_dict))
else:
rnnlm = lm_pytorch.ClassifierWithState(
extlm_pytorch.LookAheadWordLM(word_rnnlm.predictor,
word_dict, char_dict))
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0] if args.num_encs == 1 else [feat[idx][0] for idx in range(model.num_encs)]
if args.streaming_mode == 'window' and args.num_encs == 1:
logging.info('Using streaming recognizer with window size %d frames', args.streaming_window)
se2e = WindowStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
for i in range(0, feat.shape[0], args.streaming_window):
logging.info('Feeding frames %d - %d', i, i + args.streaming_window)
se2e.accept_input(feat[i:i + args.streaming_window])
logging.info('Running offline attention decoder')
se2e.decode_with_attention_offline()
logging.info('Offline attention decoder finished')
nbest_hyps = se2e.retrieve_recognition()
elif args.streaming_mode == 'segment' and args.num_encs == 1:
logging.info('Using streaming recognizer with threshold value %d', args.streaming_min_blank_dur)
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({'yseq': [], 'score': 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i:i + r])
if hyps is not None:
text = ''.join([train_args.char_list[int(x)]
for x in hyps[0]['yseq'][1:-1] if int(x) != -1])
text = text.replace('\u2581', ' ').strip() # for SentencePiece
text = text.replace(model.space, ' ')
text = text.replace(model.blank, '')
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]['yseq'].extend(hyps[n]['yseq'])
nbest_hyps[n]['score'] += hyps[n]['score']
else:
nbest_hyps = model.recognize(feat, args, train_args.char_list, rnnlm)
new_js[name] = add_results_to_json(js[name], nbest_hyps, train_args.char_list)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]['input'][0]['shape'][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0] if args.num_encs == 1 else load_inputs_and_targets(batch)
if args.streaming_mode == 'window' and args.num_encs == 1:
raise NotImplementedError
elif args.streaming_mode == 'segment' and args.num_encs == 1:
if args.batchsize > 1:
raise NotImplementedError
feat = feats[0]
nbest_hyps = []
for n in range(args.nbest):
nbest_hyps.append({'yseq': [], 'score': 0.0})
se2e = SegmentStreamingE2E(e2e=model, recog_args=args, rnnlm=rnnlm)
r = np.prod(model.subsample)
for i in range(0, feat.shape[0], r):
hyps = se2e.accept_input(feat[i:i + r])
if hyps is not None:
text = ''.join([train_args.char_list[int(x)]
for x in hyps[0]['yseq'][1:-1] if int(x) != -1])
text = text.replace('\u2581', ' ').strip() # for SentencePiece
text = text.replace(model.space, ' ')
text = text.replace(model.blank, '')
logging.info(text)
for n in range(args.nbest):
nbest_hyps[n]['yseq'].extend(hyps[n]['yseq'])
nbest_hyps[n]['score'] += hyps[n]['score']
nbest_hyps = [nbest_hyps]
else:
nbest_hyps = model.recognize_batch(feats, args, train_args.char_list, rnnlm=rnnlm)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(js[name], nbest_hyp, train_args.char_list)
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
|
26,216 |
def prepare_ptf(ptfhost, mg_facts, dut_facts, vnet_config):
"""
Prepares the PTF container for testing
Generates and copies PTF required config files to the PTF host
Args:
ptfhost: PTF host object
mg_facts: Minigraph facts
dut_facts: DUT host facts
vnet_config: Configuration file generated from templates/vnet_config.j2
"""
logger.info("Preparing PTF host")
arp_responder_conf = safe_open_template("templates/arp_responder.conf.j2") \
.render(arp_responder_args="--conf /tmp/vnet_arpresponder.conf")
ptfhost.copy(content=arp_responder_conf, dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread")
ptfhost.shell("supervisorctl update")
logger.debug("VNet config is: " + str(vnet_config))
vnet_json = {
"minigraph_port_indices": mg_facts["minigraph_port_indices"],
"minigraph_portchannel_interfaces": mg_facts["minigraph_portchannel_interfaces"],
"minigraph_portchannels": mg_facts["minigraph_portchannels"],
"minigraph_lo_interfaces": mg_facts["minigraph_lo_interfaces"],
"minigraph_vlans": mg_facts["minigraph_vlans"],
"minigraph_vlan_interfaces": mg_facts["minigraph_vlan_interfaces"],
"dut_mac": dut_facts["ansible_Ethernet0"]["macaddress"],
"vnet_interfaces": vnet_config["vnet_intf_list"],
"vnet_routes": vnet_config["vnet_route_list"]+vnet_config["vnet_subnet_routes"],
"vnet_local_routes": vnet_config["vnet_local_routes"],
"vnet_neighbors": vnet_config["vnet_nbr_list"],
"vnet_peers": vnet_config["vnet_peer_list"]
}
ptfhost.copy(content=json.dumps(vnet_json, indent=2), dest="/tmp/vnet.json")
|
def prepare_ptf(ptfhost, mg_facts, dut_facts, vnet_config):
"""
Prepares the PTF container for testing
Generates and copies PTF required config files to the PTF host
Args:
ptfhost: PTF host object
mg_facts: Minigraph facts
dut_facts: DUT host facts
vnet_config: Configuration file generated from templates/vnet_config.j2
"""
logger.info("Preparing PTF host")
arp_responder_conf = safe_open_template("templates/arp_responder.conf.j2") \
.render(arp_responder_args="--conf /tmp/vnet_arpresponder.conf")
ptfhost.copy(content=arp_responder_conf, dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread")
ptfhost.shell("supervisorctl update")
logger.debug("VNet config is: " + str(vnet_config))
vnet_json = {
"minigraph_port_indices": mg_facts["minigraph_port_indices"],
"minigraph_portchannel_interfaces": mg_facts["minigraph_portchannel_interfaces"],
"minigraph_portchannels": mg_facts["minigraph_portchannels"],
"minigraph_lo_interfaces": mg_facts["minigraph_lo_interfaces"],
"minigraph_vlans": mg_facts["minigraph_vlans"],
"minigraph_vlan_interfaces": mg_facts["minigraph_vlan_interfaces"],
"dut_mac": dut_facts["ansible_Ethernet0"]["macaddress"],
"vnet_interfaces": vnet_config["vnet_intf_list"],
"vnet_routes": vnet_config["vnet_route_list"] + vnet_config["vnet_subnet_routes"],
"vnet_local_routes": vnet_config["vnet_local_routes"],
"vnet_neighbors": vnet_config["vnet_nbr_list"],
"vnet_peers": vnet_config["vnet_peer_list"]
}
ptfhost.copy(content=json.dumps(vnet_json, indent=2), dest="/tmp/vnet.json")
|
55,324 |
def fetch_okta_oauth2_permissions(strategy, details, user=None, is_new=False, *args, **kwargs):
org_url = getattr(settings, 'SOCIAL_AUTH_OKTA_OAUTH2_API_URL', '')
admin_group_name = getattr(settings, "OKTA_OAUTH2_ADMIN_GROUP_NAME", "")
if not user or not isinstance(kwargs['backend'], OktaOAuth2):
return
# OktaOpenIdConnect inherits `OktaOAuth2`, so we have to explicitly skip OAuth2 trying
# to fetch permissions when using OIDC backend.
if isinstance(kwargs['backend'], OktaOpenIdConnect):
return
response = requests.post(
url=f"{org_url}/v1/userinfo",
headers={
'Authorization': 'Bearer {}'.format(kwargs['response']['access_token']),
},
)
response.raise_for_status()
response = response.json()
is_superuser = admin_group_name in response.get("groups", [])
is_staff = admin_group_name in response.get("groups", [])
if user.is_superuser != is_superuser:
user.is_superuser = is_superuser
user.save()
if user.is_staff != is_staff:
user.is_staff = is_staff
user.save()
|
def fetch_okta_oauth2_permissions(strategy, details, user=None, is_new=False, *args, **kwargs):
org_url = getattr(settings, 'SOCIAL_AUTH_OKTA_OAUTH2_API_URL', '')
admin_group_name = getattr(settings, "OKTA_OAUTH2_ADMIN_GROUP_NAME", "")
if not user or not isinstance(kwargs['backend'], OktaOAuth2):
return
# OktaOpenIdConnect inherits `OktaOAuth2`, so we have to explicitly skip OAuth2 trying
# to fetch permissions when using OIDC backend.
if isinstance(kwargs['backend'], OktaOpenIdConnect):
return
response = requests.post(
url=f"{org_url}/v1/userinfo",
headers={
'Authorization': 'Bearer {}'.format(kwargs['response']['access_token']),
},
)
response.raise_for_status()
response = response.json()
is_superuser = admin_group_name in response.get("groups", [])
is_staff = admin_group_name in response.get("groups", [])
user_changed = False
if user.is_superuser != is_superuser:
user.is_superuser = is_superuser
user_changed = True
if user.is_staff != is_staff:
user.is_staff = is_staff
user_changed = True
if user_changed:
user.save()
admin_group_name = getattr(settings, "OKTA_OPENIDCONNECT_ADMIN_GROUP_NAME", "")
if not user or not isinstance(kwargs['backend'], OktaOpenIdConnect):
return
response = requests.post(
url=f"{org_url}/v1/userinfo",
headers={
'Authorization': 'Bearer {}'.format(kwargs['response']['access_token']),
},
)
response.raise_for_status()
response = response.json()
is_superuser = admin_group_name in response.get("groups", [])
is_staff = admin_group_name in response.get("groups", [])
if user.is_superuser != is_superuser:
user.is_superuser = is_superuser
user.save()
if user.is_staff != is_staff:
user.is_staff = is_staff
user.save()
|
26,691 |
def integrate_dag_plugins() -> None:
"""Integrates operator, sensor, hook, macro plugins."""
# pylint: disable=global-statement
global plugins
global operators_modules
global sensors_modules
global hooks_modules
global macros_modules
# pylint: enable=global-statement
if operators_modules is not None and \
sensors_modules is not None and \
hooks_modules is not None and \
macros_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate DAG plugins")
operators_modules = []
sensors_modules = []
hooks_modules = []
macros_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
plugin_name: str = plugin.name
operators_module = make_module('airflow.operators.' + plugin_name, plugin.operators + plugin.sensors)
sensors_module = make_module('airflow.sensors.' + plugin_name, plugin.sensors)
hooks_module = make_module('airflow.hooks.' + plugin_name, plugin.hooks)
macros_module = make_module('airflow.macros.' + plugin_name, plugin.macros)
operators_modules.append(operators_module)
sensors_modules.append(sensors_module)
hooks_modules.append(hooks_module)
macros_modules.append(macros_module)
sys.modules[operators_module.__name__] = operators_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[operators_module._name] = operators_module # pylint: disable=protected-access
sys.modules[sensors_module.__name__] = sensors_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[sensors_module._name] = sensors_module # pylint: disable=protected-access
sys.modules[hooks_module.__name__] = hooks_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[hooks_module._name] = hooks_module # pylint: disable=protected-access
sys.modules[macros_module.__name__] = macros_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[macros_module._name] = macros_module # pylint: disable=protected-access
|
def integrate_dag_plugins() -> None:
"""Integrates operator, sensor, hook, macro plugins."""
# pylint: disable=global-statement
global plugins
global operators_modules
global sensors_modules
global hooks_modules
global macros_modules
# pylint: enable=global-statement
if operators_modules is not None and \
sensors_modules is not None and \
hooks_modules is not None and \
macros_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate DAG plugins")
operators_modules = []
sensors_modules = []
hooks_modules = []
macros_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
plugin_name: str = plugin.name
operators_module = make_module(f'airflow.operators.{plugin_name}', plugin.operators + plugin.sensors)
sensors_module = make_module('airflow.sensors.' + plugin_name, plugin.sensors)
hooks_module = make_module('airflow.hooks.' + plugin_name, plugin.hooks)
macros_module = make_module('airflow.macros.' + plugin_name, plugin.macros)
operators_modules.append(operators_module)
sensors_modules.append(sensors_module)
hooks_modules.append(hooks_module)
macros_modules.append(macros_module)
sys.modules[operators_module.__name__] = operators_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[operators_module._name] = operators_module # pylint: disable=protected-access
sys.modules[sensors_module.__name__] = sensors_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[sensors_module._name] = sensors_module # pylint: disable=protected-access
sys.modules[hooks_module.__name__] = hooks_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[hooks_module._name] = hooks_module # pylint: disable=protected-access
sys.modules[macros_module.__name__] = macros_module # pylint: disable=no-member
# noinspection PyProtectedMember
globals()[macros_module._name] = macros_module # pylint: disable=protected-access
|
48,494 |
def bokeh_dataframe(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Generate an inline visual representations of a single color palette.
If evaluating the dataframe HTML repr fails, then a SphinxError is raised to
terminate the build.
For details on the arguments to this function, consult the Docutils docs:
http://docutils.sourceforge.net/docs/howto/rst-roles.html#define-the-role-function
"""
module_name, df_name = text.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Unable to generate HTML table for {df_name}: couldn't import module {module_name}")
df = getattr(module, df_name, None)
if df is None:
raise SphinxError(f"Unable to generate HTML table for {df_name}: no Dataframe {df_name} in module {module_name}")
if not isinstance(df, pd.DataFrame):
raise SphinxError(f"{text!r} is not a pandas Dataframe")
node = nodes.raw("", df.head().to_html(), format="html")
return [node], []
|
def bokeh_dataframe(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Generate an inline visual representations of a single color palette.
If the HTML representation of the dataframe can not be created, a
SphinxError is raised to terminate the build.
For details on the arguments to this function, consult the Docutils docs:
http://docutils.sourceforge.net/docs/howto/rst-roles.html#define-the-role-function
"""
module_name, df_name = text.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Unable to generate HTML table for {df_name}: couldn't import module {module_name}")
df = getattr(module, df_name, None)
if df is None:
raise SphinxError(f"Unable to generate HTML table for {df_name}: no Dataframe {df_name} in module {module_name}")
if not isinstance(df, pd.DataFrame):
raise SphinxError(f"{text!r} is not a pandas Dataframe")
node = nodes.raw("", df.head().to_html(), format="html")
return [node], []
|
44,713 |
def _make_divisible(v, divisor, min_value=None):
"""
이 함수는 원본 ts repo에서 가져왔습니다.
모든 계층들이 8로 나뉠수 있는 채널 숫자를 가지는 것을 보장합니다.
다음에서 확인할 수 있습니다:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# 내림은 10% 이상으로 내려가지 않는 것을 보장합니다.
if new_v < 0.9 * v:
new_v += divisor
return new_v
|
def _make_divisible(v, divisor, min_value=None):
"""
이 함수는 원본 tensorflow 저장소에서 가져왔습니다.
모든 계층들이 8로 나뉠수 있는 채널 숫자를 가지는 것을 보장합니다.
다음에서 확인할 수 있습니다:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# 내림은 10% 이상으로 내려가지 않는 것을 보장합니다.
if new_v < 0.9 * v:
new_v += divisor
return new_v
|
31,058 |
def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'ironportQuarantineReleaseEmail':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearchSpam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearch':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
|
def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'ironportQuarantineReleaseEmail':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearchSpam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'iron-port-search':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
|
55,758 |
def test_paint_3d_undo(MouseEvent):
"""Test painting labels with circle brush when scaled."""
data = np.zeros((20, 20, 20), dtype=np.int32)
data[10, :, :] = 1
layer = Labels(data)
layer.brush_size = 5
layer.mode = 'erase'
layer._slice_dims(point=(0, 0, 0), ndisplay=3)
layer.n_edit_dimensions = 3
# Simulate click
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_press',
is_dragging=False,
position=(-1, -1, -1),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_press_callbacks(layer, event)
# Simulate drag. Note: we need to include top left and bottom right in the
# drag or there are no coordinates to interpolate
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_move',
is_dragging=True,
position=(-1, 0.1, 0.1),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_move_callbacks(layer, event)
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_move',
is_dragging=True,
position=(-1, 18.9, 18.9),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_release',
is_dragging=False,
position=(-1, 21, 21),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_release_callbacks(layer, event)
# Erasing goes from (-1, -1, -1) to (-1, 21, 21), should split the labels
# into two sections. Undoing should work and reunite the labels to one
# square
assert ndi.label(layer.data)[1] == 2
layer.undo()
assert ndi.label(layer.data)[1] == 1
|
def test_erase_3d_undo(MouseEvent):
"""Test erasing labels 3D and then undoing the erase."""
data = np.zeros((20, 20, 20), dtype=np.int32)
data[10, :, :] = 1
layer = Labels(data)
layer.brush_size = 5
layer.mode = 'erase'
layer._slice_dims(point=(0, 0, 0), ndisplay=3)
layer.n_edit_dimensions = 3
# Simulate click
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_press',
is_dragging=False,
position=(-1, -1, -1),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_press_callbacks(layer, event)
# Simulate drag. Note: we need to include top left and bottom right in the
# drag or there are no coordinates to interpolate
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_move',
is_dragging=True,
position=(-1, 0.1, 0.1),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_move_callbacks(layer, event)
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_move',
is_dragging=True,
position=(-1, 18.9, 18.9),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_move_callbacks(layer, event)
# Simulate release
event = ReadOnlyWrapper(
MouseEvent(
type='mouse_release',
is_dragging=False,
position=(-1, 21, 21),
view_direction=(1, 0, 0),
dims_displayed=(0, 1, 2),
dims_point=(0, 0, 0),
)
)
mouse_release_callbacks(layer, event)
# Erasing goes from (-1, -1, -1) to (-1, 21, 21), should split the labels
# into two sections. Undoing should work and reunite the labels to one
# square
assert ndi.label(layer.data)[1] == 2
layer.undo()
assert ndi.label(layer.data)[1] == 1
|
48,437 |
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, no_remove=False,
allow_unauthenticated=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, no_remove, allow_unauthenticated, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
|
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, no_remove=False,
allow_unauthenticated=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if fail_on_autoremove:
fail_on_autoremove = '--no-remove'
else:
fail_on_autoremove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, no_remove, allow_unauthenticated, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
|
30,745 |
def blocked_lists_get_command(client):
result = client.blocked_lists_get()
rules = result.get('rules')
if not rules:
return "Blocked List is empty"
else:
readable_output = tableToMarkdown('Blocked List:', rules)
return CommandResults(
readable_output=readable_output,
outputs_prefix='TrendMicroCAS.BlockedList',
outputs_key_field='BlockedList',
outputs=rules,
raw_response=result
)
|
def blocked_lists_get_command(client):
result = client.blocked_lists_get()
rules = result.get('rules')
if not rules:
return "Blocked List is empty"
else:
readable_output = tableToMarkdown('Blocked List', rules)
return CommandResults(
readable_output=readable_output,
outputs_prefix='TrendMicroCAS.BlockedList',
outputs_key_field='BlockedList',
outputs=rules,
raw_response=result
)
|
29,118 |
def _save_audio_file(
raw_audio_file, filename, entity_type, entity_id, user_id):
"""Saves the given audio file in file system.
Args:
raw_audio_file: *. The raw audio data.
filename: str. The filename of the audio.
entity_type: str. The type of entity to which the audio belongs.
entity_id: str. The id of the entity to which the audio belongs.
user_id: str. The ID of the user saving the audio.
Raises:
Exception: If audio not supplied.
Exception: If the filename extension is unsupported.
"""
allowed_formats = list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())
if not raw_audio_file:
raise Exception('No audio supplied')
dot_index = filename.rfind('.')
extension = filename[dot_index + 1:].lower()
if dot_index == -1 or dot_index == 0:
raise Exception(
'No filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise Exception(
'Invalid filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
tempbuffer = python_utils.string_io()
tempbuffer.write(raw_audio_file)
tempbuffer.seek(0)
try:
# For every accepted extension, use the mutagen-specific
# constructor for that type. This will catch mismatched audio
# types e.g. uploading a flac file with an MP3 extension.
if extension == 'mp3':
audio = mp3.MP3(tempbuffer)
else:
audio = mutagen.File(tempbuffer)
except mutagen.MutagenError:
# The calls to mp3.MP3() versus mutagen.File() seem to behave
# differently upon not being able to interpret the audio.
# mp3.MP3() raises a MutagenError whereas mutagen.File()
# seems to return None. It's not clear if this is always
# the case. Occasionally, mutagen.File() also seems to
# raise a MutagenError.
raise Exception('Audio not recognized as a %s file' % extension)
tempbuffer.close()
if audio is None:
raise Exception('Audio not recognized as a %s file' % extension)
if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC:
raise Exception(
'Audio files must be under %s seconds in length. The uploaded '
'file is %.2f seconds long.' % (
feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length))
if len(set(audio.mime).intersection(
set(feconf.ACCEPTED_AUDIO_EXTENSIONS[extension]))) == 0:
raise Exception(
'Although the filename extension indicates the file '
'is a %s file, it was not recognized as one. '
'Found mime types: %s' % (extension, audio.mime))
mimetype = audio.mime[0]
# For a strange, unknown reason, the audio variable must be
# deleted before opening cloud storage. If not, cloud storage
# throws a very mysterious error that entails a mutagen
# object being recursively passed around in app engine.
del audio
# Audio files are stored to the datastore in the dev env, and to GCS
# in production.
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(entity_type, entity_id))
fs.commit(user_id, 'audio/%s' % filename, raw_audio_file, mimetype=mimetype)
|
def _save_audio_file(
raw_audio_file, filename, entity_type, entity_id, user_id):
"""Saves the given audio file in file system.
Args:
raw_audio_file: *. The raw audio data.
filename: str. The filename of the audio.
entity_type: str. The type of entity to which the audio belongs.
entity_id: str. The id of the entity to which the audio belongs.
user_id: str. The ID of the user saving the audio.
Raises:
Exception: If audio not supplied.
Exception: If the filename extension is unsupported.
"""
allowed_formats = list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())
if not raw_audio_file:
raise Exception('No audio supplied')
dot_index = filename.rfind('.')
extension = filename[dot_index + 1:].lower()
if dot_index == -1 or dot_index == 0:
raise Exception(
'No filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise Exception(
'Invalid filename extension. It should have '
'one of the following extensions: %s' % allowed_formats)
tempbuffer = python_utils.string_io()
tempbuffer.write(raw_audio_file)
tempbuffer.seek(0)
try:
# For every accepted extension, use the mutagen-specific
# constructor for that type. This will catch mismatched audio
# types e.g. uploading a flac file with an MP3 extension.
if extension == 'mp3':
audio = mp3.MP3(tempbuffer)
else:
audio = mutagen.File(tempbuffer)
except mutagen.MutagenError:
# The calls to mp3.MP3() versus mutagen.File() seem to behave
# differently upon not being able to interpret the audio.
# mp3.MP3() raises a MutagenError whereas mutagen.File()
# seems to return None. It's not clear if this is always
# the case. Occasionally, mutagen.File() also seems to
# raise a MutagenError.
raise Exception('Audio not recognized as a %s file' % extension)
tempbuffer.close()
if audio is None:
raise Exception('Audio not recognized as a %s file' % extension)
if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC:
raise Exception(
'Audio files must be under %s seconds in length. The uploaded '
'file is %.2f seconds long.' % (
feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length))
if len(set(audio.mime).intersection(
set(feconf.ACCEPTED_AUDIO_EXTENSIONS[extension]))) == 0:
raise Exception(
'Although the filename extension indicates the file '
'is a %s file, it was not recognized as one. '
'Found mime types: %s' % (extension, audio.mime))
mimetype = audio.mime[0]
# For a strange, unknown reason, the audio variable must be
# deleted before opening cloud storage. If not, cloud storage
# throws a very mysterious error that entails a mutagen
# object being recursively passed around in app engine.
del audio
# Audio files are stored to the datastore in the dev env, and to GCS
# in production.
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(entity_type, entity_id))
fs.commit(user_id, 'audio/%s' % filename, raw_audio_file, mimetype=mimetype)
|
30,822 |
def get_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
scim_flat_data = map_scim(scim)
user_id = scim_flat_data.get('id')
username = scim_flat_data.get('userName')
email = scim_flat_data.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id,, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = email if email else username
res = client.get_user(user_term)
res_json = res.json()
if res.status_code == 200:
active = True if res_json['status'] == "active" else False
generic_iam_context = OutputContext(success=True, iden=res_json.get('id'), email=res_json.get('email'),
username=username, details=res_json, active=active)
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage=res_json.get('message'), details=res_json)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Get Zoom User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def get_user_command(client, args):
scim = verify_and_load_scim_data(args.get('scim'))
scim_flat_data = map_scim(scim)
user_id = scim_flat_data.get('id')
username = scim_flat_data.get('userName')
email = scim_flat_data.get('email')
if not (user_id or username or email):
raise Exception('You must provide either the id, email or username of the user')
if user_id:
user_term = user_id
else:
user_term = email if email else username
res = client.get_user(user_term)
res_json = res.json()
if res.status_code == 200:
active = True if res_json['status'] == "active" else False
generic_iam_context = OutputContext(success=True, iden=res_json.get('id'), email=res_json.get('email'),
username=username, details=res_json, active=active)
elif res.status_code == 404:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username, errorCode=404,
errorMessage=res_json.get('message'), details=res_json)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, username=username,
errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Get Zoom User:', generic_iam_context.data)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
13,879 |
def check_inputfile(value):
r"""
Check that the input file is present. Return the full path.
"""
if not os.path.isfile(value):
raise ArgumentTypeError(
"Element {value} is not a file.".format(value=value))
return os.path.abspath(value)
|
def check_inputfile(value):
r"""
Check that the input file is present. Return the full path.
"""
if not os.path.isfile(value):
raise ArgumentTypeError(
"Should be a file that already exists: {value!r}".format(value=value))
return os.path.abspath(value)
|
8,249 |
def carrington_header(date, observer_coordinate, *, shape_out, projection_code="CAR"):
"""
Construct a FITS-WCS header for a Carrington coordinate frame.
The date-time and observer coordinate of the new coordinate frame
are taken from the input map. The resulting WCS covers the full surface
of the Sun, and has a reference coordinate at (0, 0) degrees Carrington
Longitude/Latitude.
Parameters
----------
date :
Date for the output header.
observer_coordinate :
Observer coordinate for the output header.
shape_out : [int, int]
Output map shape, number of pixels in (latitude, longitude).
projection_code : {'CAR', 'CEA'}
Projection to use for the latitude coordinate.
Returns
-------
`~sunpy.util.MetaDict`
"""
valid_codes = {"CAR", "CEA"}
if projection_code not in valid_codes:
raise ValueError(f"projection_code must be one of {valid_codes}")
frame_out = SkyCoord(
0 * u.deg,
0 * u.deg,
frame="heliographic_carrington",
obstime=date,
observer=observer_coordinate,
)
if projection_code == "CAR":
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1])] * u.deg / u.pix
elif projection_code == "CEA":
# Since, this map uses the cylindrical equal-area (CEA) projection,
# the spacing needs to be to 180/pi times the sin(latitude)
# spacing
# Reference: Section 5.5, Thompson 2006
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1]) / (np.pi / 2)] * u.deg / u.pix
# Header helper expects shape to be in [y, x] order, but scale in [x, y]...
header = make_fitswcs_header(shape_out[::-1], frame_out, scale=scale, projection_code=projection_code)
return header
|
def carrington_header(date, observer_coordinate, *, dimensions, projection_code="CAR"):
"""
Construct a FITS-WCS header for a Carrington coordinate frame.
The date-time and observer coordinate of the new coordinate frame
are taken from the input map. The resulting WCS covers the full surface
of the Sun, and has a reference coordinate at (0, 0) degrees Carrington
Longitude/Latitude.
Parameters
----------
date :
Date for the output header.
observer_coordinate :
Observer coordinate for the output header.
shape_out : [int, int]
Output map shape, number of pixels in (latitude, longitude).
projection_code : {'CAR', 'CEA'}
Projection to use for the latitude coordinate.
Returns
-------
`~sunpy.util.MetaDict`
"""
valid_codes = {"CAR", "CEA"}
if projection_code not in valid_codes:
raise ValueError(f"projection_code must be one of {valid_codes}")
frame_out = SkyCoord(
0 * u.deg,
0 * u.deg,
frame="heliographic_carrington",
obstime=date,
observer=observer_coordinate,
)
if projection_code == "CAR":
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1])] * u.deg / u.pix
elif projection_code == "CEA":
# Since, this map uses the cylindrical equal-area (CEA) projection,
# the spacing needs to be to 180/pi times the sin(latitude)
# spacing
# Reference: Section 5.5, Thompson 2006
scale = [360 / int(shape_out[0]), 180 / int(shape_out[1]) / (np.pi / 2)] * u.deg / u.pix
# Header helper expects shape to be in [y, x] order, but scale in [x, y]...
header = make_fitswcs_header(shape_out[::-1], frame_out, scale=scale, projection_code=projection_code)
return header
|
36,055 |
def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict contains the necessary keywords"""
instructions_dict = instructions.get_dict()
retrieve_files = instructions_dict.get('retrieve_files', None)
if retrieve_files is None:
errmsg = (
'\n\n'
'no indication of what to do in the instruction node:\n > {}\n'
'(to store the files in the repository set retrieve_files=True,\n'
'to copy them to the specified folder on the remote computer,\n'
'set it to False)\n'
)
return errmsg.format(instructions.uuid)
if not isinstance(retrieve_files, bool):
errmsg = (
'entry for retrieve files inside of instruction node {} must be\n'
'either True or False; instead, it is: {}'
)
return errmsg.format(instructions.uuid, retrieve_files)
local_files = instructions_dict.get('local_files', None)
remote_files = instructions_dict.get('remote_files', None)
symlink_files = instructions_dict.get('symlink_files', None)
if not any([local_files, remote_files, symlink_files]):
errmsg = (
'no indication of which files to copy were found in the instruction node {}.\n'
'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n'
'These should be lists containing tuples following the pattern:\n'
'[ ... (source_node_key, source_relpath, target_relpath) ... ] \n'
)
return errmsg.format(instructions.uuid)
|
def validate_instructions(instructions, ctx): # pylint: disable=inconsistent-return-statements, unused-argument
"""Check that the instructions dict contains the necessary keywords"""
instructions_dict = instructions.get_dict()
retrieve_files = instructions_dict.get('retrieve_files', None)
if retrieve_files is None:
errmsg = (
'\n\n'
'no indication of what to do in the instruction node:\n > {}\n'
'(to store the files in the repository set retrieve_files=True,\n'
'to copy them to the specified folder on the remote computer,\n'
'set it to False)\n'
)
return errmsg.format(instructions.uuid)
if not isinstance(retrieve_files, bool):
errmsg = (
'entry for retrieve files inside of instruction node {} must be\n'
'either True or False; instead, it is: {}'
)
return errmsg.format(instructions.uuid, retrieve_files)
local_files = instructions_dict.get('local_files', None)
remote_files = instructions_dict.get('remote_files', None)
symlink_files = instructions_dict.get('symlink_files', None)
if not any([local_files, remote_files, symlink_files]):
errmsg = (
'no indication of which files to copy were found in the instruction node {}.\n'
'Please include at least one of `local_files`, `remote_files`, or `symlink_files`.\n'
'These should be lists containing tuples following the pattern:\n'
'[ ... (source_node_key, source_relpath, target_relpath) ... ] \n'
)
return errmsg
|
43,919 |
def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, :math:`\alpha`
is the exponent and :math:`r = (x, y, z)` determines the center of the function. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
|
def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function centred at the position :math:`r = (x, y, z)` is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, :math:`\alpha`
is the exponent and :math:`r = (x, y, z)` determines the center of the function. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
|
8,901 |
def test_list_parse_new_lines_strip():
option = types.ListAttribute('foo', strip=False)
# strip isn't used for new-line based list attribute
assert option.parse("""
value 1
"# value 2"
value 3
""") == [
'value 1',
'# value 2',
'value 3',
]
|
def test_list_parse_new_lines_no_strip():
option = types.ListAttribute('foo', strip=False)
# strip isn't used for new-line based list attribute
assert option.parse("""
value 1
"# value 2"
value 3
""") == [
'value 1',
'# value 2',
'value 3',
]
|
6,743 |
def openTestLog(path: FilePath) -> TextIO:
"""
Open the given path such that test log messages can be written to it.
"""
# Always use UTF-8 because, considering all platforms, the system default
# encoding can not reliably encode all code points.
return open(path.path, "a", encoding="utf-8", errors="strict")
|
def _openUTF8Append(path: FilePath) -> TextIO:
"""
Open the given path such that test log messages can be written to it.
"""
# Always use UTF-8 because, considering all platforms, the system default
# encoding can not reliably encode all code points.
return open(path.path, "a", encoding="utf-8", errors="strict")
|
49,954 |
def _test_textsize(request, tmp_path, encoding):
tempname = save_font(request, tmp_path, encoding)
font = ImageFont.load(tempname)
for i in range(255):
(dx, dy) = font.getsize(bytearray([i]))
assert dy == 20
assert dx in (0, 10)
message = charsets[encoding]["message"].encode(encoding)
for j in range(len(message)):
msg = message[: j + 1]
assert font.getsize(msg) == (len(msg) * 10, 20)
|
def _test_textsize(request, tmp_path, encoding):
tempname = save_font(request, tmp_path, encoding)
font = ImageFont.load(tempname)
for i in range(255):
(dx, dy) = font.getsize(bytearray([i]))
assert dy == 20
assert dx in (0, 10)
message = charsets[encoding]["message"].encode(encoding)
for i in range(len(message)):
msg = message[: i + 1]
assert font.getsize(msg) == (len(msg) * 10, 20)
|
43,577 |
def random_layers_uniform(n_layers, n_wires, n_rots=None, low=0, high=2 * pi, seed=None):
r"""Creates an initial parameter array for :func:`~.RandomLayers`, drawn from a uniform distribution.
The shape of the parameter array is ``(n_layers, n_rots)`` and each parameter is drawn uniformly at random \
from between ``low`` and ``high``. The parameters define the rotation angles of the randomly \
positioned rotations applied in each layer.
Args:
n_layers (int): number of layers
n_wires (int): number of qubits
Keyword Args:
n_rots (int): number of rotations, if ``None``, ``n_rots=n_wires``
low (float): minimum value of non-angle gate parameters
high (float): maximum value of non-angle gate parameters
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
parameter array
"""
if seed is not None:
np.random.seed(seed)
if n_rots is None:
n_rots = n_wires
params = np.random.uniform(low=low, high=high, size=(n_layers, n_rots))
return params
|
def random_layers_uniform(n_layers, n_wires, n_rots=None, low=0, high=2 * pi, seed=None):
r"""Creates an initial parameter array for :func:`~.RandomLayers`, drawn from a uniform distribution.
The shape of the parameter array is ``(n_layers, n_rots)`` and each parameter is drawn uniformly at random \
from between ``low`` and ``high``. The parameters define the rotation angles of the randomly \
positioned rotations applied in each layer.
Args:
n_layers (int): number of layers
n_wires (int): number of qubits
Keyword Args:
n_rots (int): number of rotations, if ``None``, ``n_rots=n_wires``
low (float): minimum value of non-angle gate parameters
high (float): maximum value of non-angle gate parameters
seed (int): seed used in sampling the parameters, makes function call deterministic
Returns:
array: parameter array
"""
if seed is not None:
np.random.seed(seed)
if n_rots is None:
n_rots = n_wires
params = np.random.uniform(low=low, high=high, size=(n_layers, n_rots))
return params
|
50,305 |
def extend_parser(parser):
subparsers = parser.add_subparsers(
title="object", dest="what", help="Object to manipulate."
)
subparsers.required = True
# populate argparse for all Gitlab Object
classes = []
for cls in gitlab.v4.objects.__dict__.values():
if not isinstance(cls, type):
continue
if issubclass(cls, gitlab.base.RESTManager):
if cls._obj_cls is not None:
classes.append(cls._obj_cls)
classes.sort(key=operator.attrgetter("__name__"))
for cls in classes:
arg_name = cli.cls_to_what(cls)
object_group = subparsers.add_parser(arg_name)
object_subparsers = object_group.add_subparsers(
title="action", dest="whaction", help="Action to execute."
)
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser
|
def extend_parser(parser):
subparsers = parser.add_subparsers(
title="object", dest="what", help="Object to manipulate."
)
subparsers.required = True
# populate argparse for all Gitlab Object
classes = []
for cls in gitlab.v4.objects.__dict__.values():
if not issubclass(cls, gitlab.base.RESTManager):
continue
if cls._obj_cls is not None:
classes.append(cls._obj_cls)
classes.sort(key=operator.attrgetter("__name__"))
for cls in classes:
arg_name = cli.cls_to_what(cls)
object_group = subparsers.add_parser(arg_name)
object_subparsers = object_group.add_subparsers(
title="action", dest="whaction", help="Action to execute."
)
_populate_sub_parser_by_class(cls, object_subparsers)
object_subparsers.required = True
return parser
|
30,229 |
def assert_dirs_empty(parser, args, required):
"""
Assert that all directories exist are empty.
If exists and not empty, and -f used, delete dirs.
Parameters
----------
parser: argparse.ArgumentParser object
Parser.
args: argparse namespace
Argument list.
required: string or list of paths to files
Required paths to be checked.
create_dir: bool
If true, create the directory if it does not exist.
"""
def check(path):
if os.listdir(path):
if not args.overwrite:
parser.error(
'Output directory {} isn\'t empty and some files could be '
'overwritten or even deleted. Use -f option if you want '
'to continue.'.format(path))
else:
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
if isinstance(required, str):
required = [required]
for cur_dir in required:
check(cur_dir)
|
def assert_dirs_empty(parser, args, required):
"""
Assert that all directories exist are empty.
If dirs exist and not empty, and --force is used, delete dirs.
Parameters
----------
parser: argparse.ArgumentParser object
Parser.
args: argparse namespace
Argument list.
required: string or list of paths to files
Required paths to be checked.
create_dir: bool
If true, create the directory if it does not exist.
"""
def check(path):
if os.listdir(path):
if not args.overwrite:
parser.error(
'Output directory {} isn\'t empty and some files could be '
'overwritten or even deleted. Use -f option if you want '
'to continue.'.format(path))
else:
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(e)
if isinstance(required, str):
required = [required]
for cur_dir in required:
check(cur_dir)
|
2,232 |
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance', a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten=None,
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : str or bool, default=None
Specify the whitening strategy to use.
If 'arbitrary-variance' (default), a whitening with variance arbitrary is used.
If 'unit-variance', the whitening variance is adjusted to be unitary.
If False, the data is already considered to be whitened, and no
whitening is performed.
If None (default), 'arbitrary-variance' is used.
.. deprecated:: 1.1
From version 1.3 whiten='unit-variance' will be used by default.
`whiten=True` is deprecated from 1.1 and will be removed in 1.3.
Use `whiten=arbitrary-variance` instead.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
random_state=random_state,
)
S = est._fit(X, compute_sources=compute_sources)
if est.whiten_ in ["unitary-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
|
34,245 |
def ensure_loaded_agent(app: Sanic, allow_nlu_only: bool = False):
"""Wraps a request handler ensuring there is a loaded and usable agent.
If `allow_nlu_only is `True`, consider the agent ready event if no policy
ensemble is present.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
if not app.agent or not app.agent.is_ready(allow_nlu_only):
raise ErrorResponse(
409,
"Conflict",
"No agent loaded. To continue processing, a "
"model of a trained agent needs to be loaded.",
help_url=_docs("/user-guide/running-the-server/"),
)
return f(*args, **kwargs)
return decorated
return decorator
|
def ensure_loaded_agent(app: Sanic, allow_nlu_only: bool = False):
"""Wraps a request handler ensuring there is a loaded and usable agent.
If `allow_nlu_only` is `True`, consider the agent ready event if no policy
ensemble is present.
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
if not app.agent or not app.agent.is_ready(allow_nlu_only):
raise ErrorResponse(
409,
"Conflict",
"No agent loaded. To continue processing, a "
"model of a trained agent needs to be loaded.",
help_url=_docs("/user-guide/running-the-server/"),
)
return f(*args, **kwargs)
return decorated
return decorator
|
7,664 |
def generate_session_component(session, related_to_uid=None):
"""Generates an Event icalendar component from an Indico Session.
:param session: The Indico Session to use
:param related_to_uid: Indico uid used in related_to field
:returns: an icalendar Event
"""
uid = 'indico-session-{}@{}'.format(session.id, url_parse(config.BASE_URL).host)
url = url_for('sessions.display_session', session, _external=True)
component = generate_basic_component(session, uid, url)
if related_to_uid:
component.add('related_to', related_to_uid)
return component
|
def generate_session_component(session, related_to_uid=None):
"""Generates an Event icalendar component from an Indico Session.
:param session: The Indico Session to use
:param related_to_uid: Indico uid used in related_to field
:returns: an icalendar Event
"""
uid = f'indico-session-{session.id}@{url_parse(config.BASE_URL).host}'
url = url_for('sessions.display_session', session, _external=True)
component = generate_basic_component(session, uid, url)
if related_to_uid:
component.add('related_to', related_to_uid)
return component
|
28,159 |
def test_runs_from_different_experiments_raises(two_empty_temp_db_connections,
some_paramspecs):
"""
Test that inserting runs from multiple experiments raises
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp_1 = Experiment(conn=source_conn)
source_exp_2 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
# make 5 runs in second experiment
exp_2_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id)
exp_2_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
run_ids = exp_1_run_ids + exp_2_run_ids
source_exp_ids = np.unique([1, 2])
matchstring = ('Did not receive runs from a single experiment\\. '
f'Got runs from experiments {source_exp_ids}')
# make the matchstring safe to use as a regexp
matchstring = matchstring.replace('[', '\\[').replace(']', '\\]')
with pytest.raises(ValueError, match=matchstring):
extract_runs_into_db(source_path, target_path, *run_ids)
|
def test_runs_from_different_experiments_raises(two_empty_temp_db_connections,
some_paramspecs):
"""
Test that inserting runs from multiple experiments raises
"""
source_conn, target_conn = two_empty_temp_db_connections
source_path = path_to_dbfile(source_conn)
target_path = path_to_dbfile(target_conn)
source_exp_1 = Experiment(conn=source_conn)
source_exp_2 = Experiment(conn=source_conn)
# make 5 runs in first experiment
exp_1_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id)
exp_1_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
# make 5 runs in second experiment
exp_2_run_ids = []
for _ in range(5):
source_dataset = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id)
exp_2_run_ids.append(source_dataset.run_id)
for ps in some_paramspecs[2].values():
source_dataset.add_parameter(ps)
for val in range(10):
source_dataset.add_result({ps.name: val
for ps in some_paramspecs[2].values()})
source_dataset.mark_complete()
run_ids = exp_1_run_ids + exp_2_run_ids
source_exp_ids = [source_exp_1.exp_id, source_exp_2.exp_id]
matchstring = ('Did not receive runs from a single experiment\\. '
f'Got runs from experiments {source_exp_ids}')
# make the matchstring safe to use as a regexp
matchstring = matchstring.replace('[', '\\[').replace(']', '\\]')
with pytest.raises(ValueError, match=matchstring):
extract_runs_into_db(source_path, target_path, *run_ids)
|
7,036 |
def read_and_proc(fpath, template_vars=None, viewcfg=None, asedit=False):
"""
Read a cylc parsec config file (at fpath), inline any include files,
process with Jinja2, and concatenate continuation lines.
Jinja2 processing must be done before concatenation - it could be
used to generate continuation lines.
"""
fdir = os.path.dirname(fpath)
# Allow Python modules in lib/python/ (e.g. for use by Jinja2 filters).
suite_lib_python = os.path.join(fdir, "lib", "python")
if os.path.isdir(suite_lib_python) and suite_lib_python not in sys.path:
sys.path.append(suite_lib_python)
LOG.debug('Reading file %s', fpath)
# read the file into a list, stripping newlines
with open(fpath) as f:
flines = [line.rstrip('\n') for line in f]
do_inline = True
do_empy = True
do_jinja2 = True
do_contin = True
extra_vars = process_plugins(Path(fpath).parent)
if not template_vars:
template_vars = {}
if viewcfg:
if not viewcfg['empy']:
do_empy = False
if not viewcfg['jinja2']:
do_jinja2 = False
if not viewcfg['contin']:
do_contin = False
if not viewcfg['inline']:
do_inline = False
# inline any cylc include-files
if do_inline:
flines = inline(
flines, fdir, fpath, False, viewcfg=viewcfg, for_edit=asedit)
template_vars['CYLC_VERSION'] = __version__
# Push template_vars into extra_vars so that duplicates come from
# template_vars.
if extra_vars['templating_detected'] is not None:
will_be_overwritten = (
template_vars.keys() &
extra_vars['template_variables'].keys()
)
for key in will_be_overwritten:
LOG.warning(
f'Overriding {key}: {extra_vars["template_variables"][key]} ->'
f' {template_vars[key]}'
)
extra_vars['template_variables'].update(template_vars)
template_vars = extra_vars['template_variables']
# process with EmPy
if do_empy:
if (
extra_vars['templating_detected'] == 'empy:suite.rc' and
not re.match(r'^#![Ee]m[Pp]y\s*', flines[0])
):
if not re.match(r'^#!', flines[0]):
flines.insert(0, '#!empy')
else:
raise FileParseError(
"Plugins set templating engine = "
f"{extra_vars['templating_detected']}"
f" which does not match {flines[0]} set in flow.cylc."
)
if flines and re.match(r'^#![Ee]m[Pp]y\s*', flines[0]):
LOG.debug('Processing with EmPy')
try:
from cylc.flow.parsec.empysupport import empyprocess
except (ImportError, ModuleNotFoundError):
raise ParsecError('EmPy Python package must be installed '
'to process file: ' + fpath)
flines = empyprocess(
flines, fdir, template_vars
)
# process with Jinja2
if do_jinja2:
if (
extra_vars['templating_detected'] == 'jinja2:suite.rc' and
not re.match(r'^#![jJ]inja2\s*', flines[0])
):
if not re.match(r'^#!', flines[0]):
flines.insert(0, '#!jinja2')
else:
raise FileParseError(
"Plugins set templating engine = "
f"{extra_vars['templating_detected']}"
f" which does not match {flines[0]} set in flow.cylc."
)
if flines and re.match(r'^#![jJ]inja2\s*', flines[0]):
LOG.debug('Processing with Jinja2')
try:
from cylc.flow.parsec.jinja2support import jinja2process
except (ImportError, ModuleNotFoundError):
raise ParsecError('Jinja2 Python package must be installed '
'to process file: ' + fpath)
flines = jinja2process(
flines, fdir, template_vars
)
# concatenate continuation lines
if do_contin:
flines = _concatenate(flines)
# return rstripped lines
return [fl.rstrip() for fl in flines]
|
def read_and_proc(fpath, template_vars=None, viewcfg=None, asedit=False):
"""
Read a cylc parsec config file (at fpath), inline any include files,
process with Jinja2, and concatenate continuation lines.
Jinja2 processing must be done before concatenation - it could be
used to generate continuation lines.
"""
fdir = os.path.dirname(fpath)
# Allow Python modules in lib/python/ (e.g. for use by Jinja2 filters).
suite_lib_python = os.path.join(fdir, "lib", "python")
if os.path.isdir(suite_lib_python) and suite_lib_python not in sys.path:
sys.path.append(suite_lib_python)
LOG.debug('Reading file %s', fpath)
# read the file into a list, stripping newlines
with open(fpath) as f:
flines = [line.rstrip('\n') for line in f]
do_inline = True
do_empy = True
do_jinja2 = True
do_contin = True
extra_vars = process_plugins(Path(fpath).parent)
if not template_vars:
template_vars = {}
if viewcfg:
if not viewcfg['empy']:
do_empy = False
if not viewcfg['jinja2']:
do_jinja2 = False
if not viewcfg['contin']:
do_contin = False
if not viewcfg['inline']:
do_inline = False
# inline any cylc include-files
if do_inline:
flines = inline(
flines, fdir, fpath, False, viewcfg=viewcfg, for_edit=asedit)
template_vars['CYLC_VERSION'] = __version__
# Push template_vars into extra_vars so that duplicates come from
# template_vars.
if extra_vars['templating_detected'] is not None:
will_be_overwritten = (
template_vars.keys() &
extra_vars['template_variables'].keys()
)
for key in will_be_overwritten:
LOG.warning(
f'Overriding {key}: {extra_vars["template_variables"][key]} ->'
f' {template_vars[key]}'
)
extra_vars['template_variables'].update(template_vars)
template_vars = extra_vars['template_variables']
# process with EmPy
if do_empy:
if (
extra_vars['templating_detected'] == 'empy:flow.cylc' and
not re.match(r'^#![Ee]m[Pp]y\s*', flines[0])
):
if not re.match(r'^#!', flines[0]):
flines.insert(0, '#!empy')
else:
raise FileParseError(
"Plugins set templating engine = "
f"{extra_vars['templating_detected']}"
f" which does not match {flines[0]} set in flow.cylc."
)
if flines and re.match(r'^#![Ee]m[Pp]y\s*', flines[0]):
LOG.debug('Processing with EmPy')
try:
from cylc.flow.parsec.empysupport import empyprocess
except (ImportError, ModuleNotFoundError):
raise ParsecError('EmPy Python package must be installed '
'to process file: ' + fpath)
flines = empyprocess(
flines, fdir, template_vars
)
# process with Jinja2
if do_jinja2:
if (
extra_vars['templating_detected'] == 'jinja2:suite.rc' and
not re.match(r'^#![jJ]inja2\s*', flines[0])
):
if not re.match(r'^#!', flines[0]):
flines.insert(0, '#!jinja2')
else:
raise FileParseError(
"Plugins set templating engine = "
f"{extra_vars['templating_detected']}"
f" which does not match {flines[0]} set in flow.cylc."
)
if flines and re.match(r'^#![jJ]inja2\s*', flines[0]):
LOG.debug('Processing with Jinja2')
try:
from cylc.flow.parsec.jinja2support import jinja2process
except (ImportError, ModuleNotFoundError):
raise ParsecError('Jinja2 Python package must be installed '
'to process file: ' + fpath)
flines = jinja2process(
flines, fdir, template_vars
)
# concatenate continuation lines
if do_contin:
flines = _concatenate(flines)
# return rstripped lines
return [fl.rstrip() for fl in flines]
|
14,569 |
def eye(
timebase,
is_alive_flag,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
version,
eye_id,
overwrite_cap_settings=None,
hide_ui=False,
hwm=None,
):
"""reads eye video and detects the pupil.
Creates a window, gl context.
Grabs images from a capture.
Streams Pupil coordinates.
Reacts to notifications:
``set_detection_mapping_mode``: Sets detection method
``eye_process.should_stop``: Stops the eye process
``recording.started``: Starts recording eye video
``recording.stopped``: Stops recording eye video
``frame_publishing.started``: Starts frame publishing
``frame_publishing.stopped``: Stops frame publishing
``start_eye_plugin``: Start plugins in eye process
Emits notifications:
``eye_process.started``: Eye process started
``eye_process.stopped``: Eye process stopped
Emits data:
``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
"""
# We deferr the imports becasue of multiprocessing.
# Otherwise the world process each process also loads the other imports.
import zmq
import zmq_tools
zmq_ctx = zmq.Context()
ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url, hwm)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# logging setup
import logging
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.NOTSET)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
if is_alive_flag.value:
# indicates eye process that this is a duplicated startup
logger.warning("Aborting redundant eye process startup")
return
with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
# general imports
import traceback
import numpy as np
import cv2
# display
import glfw
from pyglui import ui, graph, cygl
from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
from pyglui.cygl.utils import Named_Texture
from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
from gl_utils import make_coord_system_pixel_based
from gl_utils import make_coord_system_norm_based
from gl_utils import is_window_visible, glViewport
# monitoring
import psutil
# Plug-ins
from plugin import Plugin_List
# helpers/utils
from uvc import get_time_monotonic
from file_methods import Persistent_Dict
from version_utils import VersionFormat
from methods import normalize, denormalize, timer
from av_writer import JPEG_Writer, MPEG_Writer, NonMonotonicTimestampError
from ndsi import H264Writer
from video_capture import source_classes, manager_classes
from roi import Roi
from background_helper import IPC_Logging_Task_Proxy
from pupil_detector_plugins import available_detector_plugins
from pupil_detector_plugins.manager import PupilDetectorManager
IPC_Logging_Task_Proxy.push_url = ipc_push_url
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible for
# shutting down the eye process properly
signal.signal(signal.SIGINT, interrupt_handler)
# UI Platform tweaks
if platform.system() == "Linux":
scroll_factor = 10.0
window_position_default = (600, 300 * eye_id + 30)
elif platform.system() == "Windows":
scroll_factor = 10.0
window_position_default = (600, 90 + 300 * eye_id)
else:
scroll_factor = 1.0
window_position_default = (600, 300 * eye_id)
icon_bar_width = 50
window_size = None
hdpi_factor = 1.0
# g_pool holds variables for this process
g_pool = SimpleNamespace()
# make some constants avaiable
g_pool.user_dir = user_dir
g_pool.version = version
g_pool.app = "capture"
g_pool.eye_id = eye_id
g_pool.process = f"eye{eye_id}"
g_pool.timebase = timebase
g_pool.camera_render_size = None
g_pool.ipc_pub = ipc_socket
def get_timestamp():
return get_time_monotonic() - g_pool.timebase.value
g_pool.get_timestamp = get_timestamp
g_pool.get_now = get_time_monotonic
default_detector_cls, available_detectors = available_detector_plugins()
plugins = (
manager_classes
+ source_classes
+ available_detectors
+ [PupilDetectorManager, Roi]
)
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
preferred_names = [
f"Pupil Cam3 ID{eye_id}",
f"Pupil Cam2 ID{eye_id}",
f"Pupil Cam1 ID{eye_id}",
]
if eye_id == 0:
preferred_names += ["HD-6000"]
default_capture_name = "UVC_Source"
default_capture_settings = {
"preferred_names": preferred_names,
"frame_size": (320, 240),
"frame_rate": 120,
}
default_plugins = [
# TODO: extend with plugins
(default_capture_name, default_capture_settings),
("UVC_Manager", {}),
("NDSI_Manager", {}),
("HMD_Streaming_Manager", {}),
("File_Manager", {}),
# Detector needs to be loaded first to set `g_pool.pupil_detector`
(default_detector_cls.__name__, {}),
("PupilDetectorManager", {}),
("Roi", {}),
]
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
nonlocal hdpi_factor
active_window = glfw.glfwGetCurrentContext()
glfw.glfwMakeContextCurrent(window)
hdpi_factor = glfw.getHDPIFactor(window)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
g_pool.gui.update_window(w, h)
g_pool.gui.collect_menus()
for g in g_pool.graphs:
g.scale = hdpi_factor
g.adjust_window_size(w, h)
adjust_gl_view(w, h)
glfw.glfwMakeContextCurrent(active_window)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_iconify(window, iconified):
g_pool.iconified = iconified
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
paths = [paths[x].decode("utf-8") for x in range(count)]
for plugin in g_pool.plugins:
if plugin.on_drop(paths):
break
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
)
if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
logger.info(
"Session setting are from a different version of this app. I will not use those."
)
session_settings.clear()
g_pool.iconified = False
g_pool.capture = None
g_pool.flip = session_settings.get("flip", False)
g_pool.display_mode = session_settings.get("display_mode", "camera_image")
g_pool.display_mode_info_text = {
"camera_image": "Raw eye camera image. This uses the least amount of CPU power",
"roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
"algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
}
def set_display_mode_info(val):
g_pool.display_mode = val
g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
# Initialize glfw
glfw.glfwInit()
if hide_ui:
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # hide window
title = "Pupil Capture - eye {}".format(eye_id)
width, height = session_settings.get("window_size", (640 + icon_bar_width, 480))
main_window = glfw.glfwCreateWindow(width, height, title, None, None)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
# UI callback functions
def set_scale(new_scale):
g_pool.gui_user_scale = new_scale
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
# gl_state settings
basic_gl_setup()
g_pool.image_tex = Named_Texture()
g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)
# setup GUI
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.iconbar)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2],
label="Interface Size",
)
)
def set_window_size():
f_width, f_height = g_pool.capture.frame_size
f_width *= 2
f_height *= 2
f_width += int(icon_bar_width * g_pool.gui.scale)
glfw.glfwSetWindowSize(main_window, f_width, f_height)
general_settings.append(ui.Button("Reset window size", set_window_size))
g_pool.hwm = pupil_socket.get_hwm()
def update_hwm(new_hwm):
g_pool.hwm = new_hwm
pupil_socket.set_hwm(new_hwm)
general_settings.append(ui.Text_Input("hwm", g_pool, setter=update_hwm, label="ZMQ High Water Mark"))
general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
general_settings.append(
ui.Selector(
"display_mode",
g_pool,
setter=set_display_mode_info,
selection=["camera_image", "roi", "algorithm"],
labels=["Camera Image", "ROI", "Algorithm"],
label="Mode",
)
)
g_pool.display_mode_info = ui.Info_Text(
g_pool.display_mode_info_text[g_pool.display_mode]
)
general_settings.append(g_pool.display_mode_info)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
toggle_general_settings(False)
plugins_to_load = session_settings.get("loaded_plugins", default_plugins)
if overwrite_cap_settings:
# Ensure that overwrite_cap_settings takes preference over source plugins
# with incorrect settings that were loaded from session settings.
plugins_to_load.append(overwrite_cap_settings)
g_pool.plugins = Plugin_List(g_pool, plugins_to_load)
if not g_pool.capture:
# Make sure we always have a capture running. Important if there was no
# capture stored in session settings.
g_pool.plugins.add(
g_pool.plugin_by_name[default_capture_name], default_capture_settings
)
g_pool.writer = None
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
# load last gui configuration
g_pool.gui.configuration = session_settings.get("ui_config", {})
# set up performance graphs
pid = os.getpid()
ps = psutil.Process(pid)
ts = g_pool.get_timestamp()
cpu_graph = graph.Bar_Graph()
cpu_graph.pos = (20, 50)
cpu_graph.update_fn = ps.cpu_percent
cpu_graph.update_rate = 5
cpu_graph.label = "CPU %0.1f"
fps_graph = graph.Bar_Graph()
fps_graph.pos = (140, 50)
fps_graph.update_rate = 5
fps_graph.label = "%0.0f FPS"
g_pool.graphs = [cpu_graph, fps_graph]
# set the last saved window size
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
should_publish_frames = False
frame_publish_format = "jpeg"
frame_publish_format_recent_warning = False
# create a timer to control window update frequency
window_update_timer = timer(1 / 60)
def window_should_update():
return next(window_update_timer)
logger.warning("Process started.")
frame = None
# Event loop
while not glfw.glfwWindowShouldClose(main_window):
if notify_sub.new_data:
t, notification = notify_sub.recv()
subject = notification["subject"]
if subject.startswith("eye_process.should_stop"):
if notification["eye_id"] == eye_id:
break
elif subject == "recording.started":
if notification["record_eye"] and g_pool.capture.online:
record_path = notification["rec_path"]
raw_mode = notification["compression"]
start_time_synced = notification["start_time_synced"]
logger.info("Will save eye video to: {}".format(record_path))
video_path = os.path.join(
record_path, "eye{}.mp4".format(eye_id)
)
if raw_mode and frame and g_pool.capture.jpeg_support:
g_pool.writer = JPEG_Writer(video_path, start_time_synced)
elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
g_pool.writer = H264Writer(
video_path,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
g_pool.capture.frame_rate,
)
else:
g_pool.writer = MPEG_Writer(video_path, start_time_synced)
elif subject == "recording.stopped":
if g_pool.writer:
logger.info("Done recording.")
try:
g_pool.writer.release()
except RuntimeError:
logger.error("No eye video recorded")
g_pool.writer = None
elif subject.startswith("meta.should_doc"):
ipc_socket.notify(
{
"subject": "meta.doc",
"actor": "eye{}".format(eye_id),
"doc": eye.__doc__,
}
)
elif subject.startswith("frame_publishing.started"):
should_publish_frames = True
frame_publish_format = notification.get("format", "jpeg")
elif subject.startswith("frame_publishing.stopped"):
should_publish_frames = False
frame_publish_format = "jpeg"
elif (
subject.startswith("start_eye_plugin")
and notification["target"] == g_pool.process
):
try:
g_pool.plugins.add(
g_pool.plugin_by_name[notification["name"]],
notification.get("args", {}),
)
except KeyError as err:
logger.error(f"Attempt to load unknown plugin: {err}")
elif subject.startswith("eye_stream.set_zmq_option.hwm"):
if notification["eye_id"] == eye_id:
update_hwm(notification['hwm'])
for plugin in g_pool.plugins:
plugin.on_notify(notification)
event = {}
for plugin in g_pool.plugins:
plugin.recent_events(event)
frame = event.get("frame")
if frame:
if should_publish_frames:
try:
if frame_publish_format == "jpeg":
data = frame.jpeg_buffer
elif frame_publish_format == "yuv":
data = frame.yuv_buffer
elif frame_publish_format == "bgr":
data = frame.bgr
elif frame_publish_format == "gray":
data = frame.gray
assert data is not None
except (AttributeError, AssertionError, NameError):
if not frame_publish_format_recent_warning:
frame_publish_format_recent_warning = True
logger.warning(
'{}s are not compatible with format "{}"'.format(
type(frame), frame_publish_format
)
)
else:
frame_publish_format_recent_warning = False
pupil_socket.send(
{
"topic": "frame.eye.{}".format(eye_id),
"width": frame.width,
"height": frame.height,
"index": frame.index,
"timestamp": frame.timestamp,
"format": frame_publish_format,
"__raw_data__": [data],
}
)
t = frame.timestamp
dt, ts = t - ts, t
try:
fps_graph.add(1.0 / dt)
except ZeroDivisionError:
pass
if g_pool.writer:
try:
g_pool.writer.write_video_frame(frame)
except NonMonotonicTimestampError as e:
logger.error(
"Recorder received non-monotonic timestamp!"
" Stopping the recording!"
)
logger.debug(str(e))
ipc_socket.notify({"subject": "recording.should_stop"})
ipc_socket.notify(
{"subject": "recording.should_stop", "remote_notify": "all"}
)
result = event.get("pupil_detection_result", None)
if result is not None:
pupil_socket.send(result)
cpu_graph.update()
# GL drawing
if window_should_update():
if is_window_visible(main_window):
glfw.glfwMakeContextCurrent(main_window)
clear_gl_screen()
glViewport(0, 0, *g_pool.camera_render_size)
for p in g_pool.plugins:
p.gl_display()
glViewport(0, 0, *window_size)
# render graphs
fps_graph.draw()
cpu_graph.draw()
# render GUI
try:
clipboard = glfw.glfwGetClipboardString(main_window).decode()
except AttributeError: # clipboard is None, might happen on startup
clipboard = ""
g_pool.gui.update_clipboard(clipboard)
user_input = g_pool.gui.update()
if user_input.clipboard != clipboard:
# only write to clipboard if content changed
glfw.glfwSetClipboardString(
main_window, user_input.clipboard.encode()
)
for button, action, mods in user_input.buttons:
x, y = glfw.glfwGetCursorPos(main_window)
pos = x * hdpi_factor, y * hdpi_factor
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for plugin in g_pool.plugins:
if plugin.on_click(pos, button, action):
break
for key, scancode, action, mods in user_input.keys:
for plugin in g_pool.plugins:
if plugin.on_key(key, scancode, action, mods):
break
for char_ in user_input.chars:
for plugin in g_pool.plugins:
if plugin.on_char(char_):
break
# update screen
glfw.glfwSwapBuffers(main_window)
glfw.glfwPollEvents()
# END while running
# in case eye recording was still runnnig: Save&close
if g_pool.writer:
logger.info("Done recording eye.")
g_pool.writer.release()
g_pool.writer = None
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
# save session persistent settings
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["flip"] = g_pool.flip
session_settings["display_mode"] = g_pool.display_mode
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["version"] = str(g_pool.version)
if not hide_ui:
glfw.glfwRestoreWindow(main_window) # need to do this for windows os
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_window_size = glfw.glfwGetWindowSize(main_window)
if 0 not in session_window_size:
session_settings["window_size"] = session_window_size
session_settings.close()
for plugin in g_pool.plugins:
plugin.alive = False
g_pool.plugins.clean()
glfw.glfwDestroyWindow(main_window)
g_pool.gui.terminate()
glfw.glfwTerminate()
logger.info("Process shutting down.")
|
def eye(
timebase,
is_alive_flag,
ipc_pub_url,
ipc_sub_url,
ipc_push_url,
user_dir,
version,
eye_id,
overwrite_cap_settings=None,
hide_ui=False,
pub_socket_hwm=None,
):
"""reads eye video and detects the pupil.
Creates a window, gl context.
Grabs images from a capture.
Streams Pupil coordinates.
Reacts to notifications:
``set_detection_mapping_mode``: Sets detection method
``eye_process.should_stop``: Stops the eye process
``recording.started``: Starts recording eye video
``recording.stopped``: Stops recording eye video
``frame_publishing.started``: Starts frame publishing
``frame_publishing.stopped``: Stops frame publishing
``start_eye_plugin``: Start plugins in eye process
Emits notifications:
``eye_process.started``: Eye process started
``eye_process.stopped``: Eye process stopped
Emits data:
``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
"""
# We deferr the imports becasue of multiprocessing.
# Otherwise the world process each process also loads the other imports.
import zmq
import zmq_tools
zmq_ctx = zmq.Context()
ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url, hwm)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
# logging setup
import logging
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.NOTSET)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
if is_alive_flag.value:
# indicates eye process that this is a duplicated startup
logger.warning("Aborting redundant eye process startup")
return
with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
# general imports
import traceback
import numpy as np
import cv2
# display
import glfw
from pyglui import ui, graph, cygl
from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
from pyglui.cygl.utils import Named_Texture
from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
from gl_utils import make_coord_system_pixel_based
from gl_utils import make_coord_system_norm_based
from gl_utils import is_window_visible, glViewport
# monitoring
import psutil
# Plug-ins
from plugin import Plugin_List
# helpers/utils
from uvc import get_time_monotonic
from file_methods import Persistent_Dict
from version_utils import VersionFormat
from methods import normalize, denormalize, timer
from av_writer import JPEG_Writer, MPEG_Writer, NonMonotonicTimestampError
from ndsi import H264Writer
from video_capture import source_classes, manager_classes
from roi import Roi
from background_helper import IPC_Logging_Task_Proxy
from pupil_detector_plugins import available_detector_plugins
from pupil_detector_plugins.manager import PupilDetectorManager
IPC_Logging_Task_Proxy.push_url = ipc_push_url
def interrupt_handler(sig, frame):
import traceback
trace = traceback.format_stack(f=frame)
logger.debug(f"Caught signal {sig} in:\n" + "".join(trace))
# NOTE: Interrupt is handled in world/service/player which are responsible for
# shutting down the eye process properly
signal.signal(signal.SIGINT, interrupt_handler)
# UI Platform tweaks
if platform.system() == "Linux":
scroll_factor = 10.0
window_position_default = (600, 300 * eye_id + 30)
elif platform.system() == "Windows":
scroll_factor = 10.0
window_position_default = (600, 90 + 300 * eye_id)
else:
scroll_factor = 1.0
window_position_default = (600, 300 * eye_id)
icon_bar_width = 50
window_size = None
hdpi_factor = 1.0
# g_pool holds variables for this process
g_pool = SimpleNamespace()
# make some constants avaiable
g_pool.user_dir = user_dir
g_pool.version = version
g_pool.app = "capture"
g_pool.eye_id = eye_id
g_pool.process = f"eye{eye_id}"
g_pool.timebase = timebase
g_pool.camera_render_size = None
g_pool.ipc_pub = ipc_socket
def get_timestamp():
return get_time_monotonic() - g_pool.timebase.value
g_pool.get_timestamp = get_timestamp
g_pool.get_now = get_time_monotonic
default_detector_cls, available_detectors = available_detector_plugins()
plugins = (
manager_classes
+ source_classes
+ available_detectors
+ [PupilDetectorManager, Roi]
)
g_pool.plugin_by_name = {p.__name__: p for p in plugins}
preferred_names = [
f"Pupil Cam3 ID{eye_id}",
f"Pupil Cam2 ID{eye_id}",
f"Pupil Cam1 ID{eye_id}",
]
if eye_id == 0:
preferred_names += ["HD-6000"]
default_capture_name = "UVC_Source"
default_capture_settings = {
"preferred_names": preferred_names,
"frame_size": (320, 240),
"frame_rate": 120,
}
default_plugins = [
# TODO: extend with plugins
(default_capture_name, default_capture_settings),
("UVC_Manager", {}),
("NDSI_Manager", {}),
("HMD_Streaming_Manager", {}),
("File_Manager", {}),
# Detector needs to be loaded first to set `g_pool.pupil_detector`
(default_detector_cls.__name__, {}),
("PupilDetectorManager", {}),
("Roi", {}),
]
# Callback functions
def on_resize(window, w, h):
nonlocal window_size
nonlocal hdpi_factor
active_window = glfw.glfwGetCurrentContext()
glfw.glfwMakeContextCurrent(window)
hdpi_factor = glfw.getHDPIFactor(window)
g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
window_size = w, h
g_pool.camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
g_pool.gui.update_window(w, h)
g_pool.gui.collect_menus()
for g in g_pool.graphs:
g.scale = hdpi_factor
g.adjust_window_size(w, h)
adjust_gl_view(w, h)
glfw.glfwMakeContextCurrent(active_window)
def on_window_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_window_char(window, char):
g_pool.gui.update_char(char)
def on_iconify(window, iconified):
g_pool.iconified = iconified
def on_window_mouse_button(window, button, action, mods):
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
x, y = x * hdpi_factor, y * hdpi_factor
g_pool.gui.update_mouse(x, y)
pos = x, y
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for p in g_pool.plugins:
p.on_pos(pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_drop(window, count, paths):
paths = [paths[x].decode("utf-8") for x in range(count)]
for plugin in g_pool.plugins:
if plugin.on_drop(paths):
break
# load session persistent settings
session_settings = Persistent_Dict(
os.path.join(g_pool.user_dir, "user_settings_eye{}".format(eye_id))
)
if VersionFormat(session_settings.get("version", "0.0")) != g_pool.version:
logger.info(
"Session setting are from a different version of this app. I will not use those."
)
session_settings.clear()
g_pool.iconified = False
g_pool.capture = None
g_pool.flip = session_settings.get("flip", False)
g_pool.display_mode = session_settings.get("display_mode", "camera_image")
g_pool.display_mode_info_text = {
"camera_image": "Raw eye camera image. This uses the least amount of CPU power",
"roi": "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
"algorithm": "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
}
def set_display_mode_info(val):
g_pool.display_mode = val
g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]
def toggle_general_settings(collapsed):
# this is the menu toggle logic.
# Only one menu can be open.
# If no menu is open the menubar should collapse.
g_pool.menubar.collapsed = collapsed
for m in g_pool.menubar.elements:
m.collapsed = True
general_settings.collapsed = collapsed
# Initialize glfw
glfw.glfwInit()
if hide_ui:
glfw.glfwWindowHint(glfw.GLFW_VISIBLE, 0) # hide window
title = "Pupil Capture - eye {}".format(eye_id)
width, height = session_settings.get("window_size", (640 + icon_bar_width, 480))
main_window = glfw.glfwCreateWindow(width, height, title, None, None)
window_pos = session_settings.get("window_position", window_position_default)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
# UI callback functions
def set_scale(new_scale):
g_pool.gui_user_scale = new_scale
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
# gl_state settings
basic_gl_setup()
g_pool.image_tex = Named_Texture()
g_pool.image_tex.update_from_ndarray(np.ones((1, 1), dtype=np.uint8) + 125)
# setup GUI
g_pool.gui = ui.UI()
g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
g_pool.menubar = ui.Scrolling_Menu(
"Settings", pos=(-500, 0), size=(-icon_bar_width, 0), header_pos="left"
)
g_pool.iconbar = ui.Scrolling_Menu(
"Icons", pos=(-icon_bar_width, 0), size=(0, 0), header_pos="hidden"
)
g_pool.gui.append(g_pool.menubar)
g_pool.gui.append(g_pool.iconbar)
general_settings = ui.Growing_Menu("General", header_pos="headline")
general_settings.append(
ui.Selector(
"gui_user_scale",
g_pool,
setter=set_scale,
selection=[0.8, 0.9, 1.0, 1.1, 1.2],
label="Interface Size",
)
)
def set_window_size():
f_width, f_height = g_pool.capture.frame_size
f_width *= 2
f_height *= 2
f_width += int(icon_bar_width * g_pool.gui.scale)
glfw.glfwSetWindowSize(main_window, f_width, f_height)
general_settings.append(ui.Button("Reset window size", set_window_size))
g_pool.hwm = pupil_socket.get_hwm()
def update_hwm(new_hwm):
g_pool.hwm = new_hwm
pupil_socket.set_hwm(new_hwm)
general_settings.append(ui.Text_Input("hwm", g_pool, setter=update_hwm, label="ZMQ High Water Mark"))
general_settings.append(ui.Switch("flip", g_pool, label="Flip image display"))
general_settings.append(
ui.Selector(
"display_mode",
g_pool,
setter=set_display_mode_info,
selection=["camera_image", "roi", "algorithm"],
labels=["Camera Image", "ROI", "Algorithm"],
label="Mode",
)
)
g_pool.display_mode_info = ui.Info_Text(
g_pool.display_mode_info_text[g_pool.display_mode]
)
general_settings.append(g_pool.display_mode_info)
g_pool.menubar.append(general_settings)
icon = ui.Icon(
"collapsed",
general_settings,
label=chr(0xE8B8),
on_val=False,
off_val=True,
setter=toggle_general_settings,
label_font="pupil_icons",
)
icon.tooltip = "General Settings"
g_pool.iconbar.append(icon)
toggle_general_settings(False)
plugins_to_load = session_settings.get("loaded_plugins", default_plugins)
if overwrite_cap_settings:
# Ensure that overwrite_cap_settings takes preference over source plugins
# with incorrect settings that were loaded from session settings.
plugins_to_load.append(overwrite_cap_settings)
g_pool.plugins = Plugin_List(g_pool, plugins_to_load)
if not g_pool.capture:
# Make sure we always have a capture running. Important if there was no
# capture stored in session settings.
g_pool.plugins.add(
g_pool.plugin_by_name[default_capture_name], default_capture_settings
)
g_pool.writer = None
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
glfw.glfwSetKeyCallback(main_window, on_window_key)
glfw.glfwSetCharCallback(main_window, on_window_char)
glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetDropCallback(main_window, on_drop)
# load last gui configuration
g_pool.gui.configuration = session_settings.get("ui_config", {})
# set up performance graphs
pid = os.getpid()
ps = psutil.Process(pid)
ts = g_pool.get_timestamp()
cpu_graph = graph.Bar_Graph()
cpu_graph.pos = (20, 50)
cpu_graph.update_fn = ps.cpu_percent
cpu_graph.update_rate = 5
cpu_graph.label = "CPU %0.1f"
fps_graph = graph.Bar_Graph()
fps_graph.pos = (140, 50)
fps_graph.update_rate = 5
fps_graph.label = "%0.0f FPS"
g_pool.graphs = [cpu_graph, fps_graph]
# set the last saved window size
on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))
should_publish_frames = False
frame_publish_format = "jpeg"
frame_publish_format_recent_warning = False
# create a timer to control window update frequency
window_update_timer = timer(1 / 60)
def window_should_update():
return next(window_update_timer)
logger.warning("Process started.")
frame = None
# Event loop
while not glfw.glfwWindowShouldClose(main_window):
if notify_sub.new_data:
t, notification = notify_sub.recv()
subject = notification["subject"]
if subject.startswith("eye_process.should_stop"):
if notification["eye_id"] == eye_id:
break
elif subject == "recording.started":
if notification["record_eye"] and g_pool.capture.online:
record_path = notification["rec_path"]
raw_mode = notification["compression"]
start_time_synced = notification["start_time_synced"]
logger.info("Will save eye video to: {}".format(record_path))
video_path = os.path.join(
record_path, "eye{}.mp4".format(eye_id)
)
if raw_mode and frame and g_pool.capture.jpeg_support:
g_pool.writer = JPEG_Writer(video_path, start_time_synced)
elif hasattr(g_pool.capture._recent_frame, "h264_buffer"):
g_pool.writer = H264Writer(
video_path,
g_pool.capture.frame_size[0],
g_pool.capture.frame_size[1],
g_pool.capture.frame_rate,
)
else:
g_pool.writer = MPEG_Writer(video_path, start_time_synced)
elif subject == "recording.stopped":
if g_pool.writer:
logger.info("Done recording.")
try:
g_pool.writer.release()
except RuntimeError:
logger.error("No eye video recorded")
g_pool.writer = None
elif subject.startswith("meta.should_doc"):
ipc_socket.notify(
{
"subject": "meta.doc",
"actor": "eye{}".format(eye_id),
"doc": eye.__doc__,
}
)
elif subject.startswith("frame_publishing.started"):
should_publish_frames = True
frame_publish_format = notification.get("format", "jpeg")
elif subject.startswith("frame_publishing.stopped"):
should_publish_frames = False
frame_publish_format = "jpeg"
elif (
subject.startswith("start_eye_plugin")
and notification["target"] == g_pool.process
):
try:
g_pool.plugins.add(
g_pool.plugin_by_name[notification["name"]],
notification.get("args", {}),
)
except KeyError as err:
logger.error(f"Attempt to load unknown plugin: {err}")
elif subject.startswith("eye_stream.set_zmq_option.hwm"):
if notification["eye_id"] == eye_id:
update_hwm(notification['hwm'])
for plugin in g_pool.plugins:
plugin.on_notify(notification)
event = {}
for plugin in g_pool.plugins:
plugin.recent_events(event)
frame = event.get("frame")
if frame:
if should_publish_frames:
try:
if frame_publish_format == "jpeg":
data = frame.jpeg_buffer
elif frame_publish_format == "yuv":
data = frame.yuv_buffer
elif frame_publish_format == "bgr":
data = frame.bgr
elif frame_publish_format == "gray":
data = frame.gray
assert data is not None
except (AttributeError, AssertionError, NameError):
if not frame_publish_format_recent_warning:
frame_publish_format_recent_warning = True
logger.warning(
'{}s are not compatible with format "{}"'.format(
type(frame), frame_publish_format
)
)
else:
frame_publish_format_recent_warning = False
pupil_socket.send(
{
"topic": "frame.eye.{}".format(eye_id),
"width": frame.width,
"height": frame.height,
"index": frame.index,
"timestamp": frame.timestamp,
"format": frame_publish_format,
"__raw_data__": [data],
}
)
t = frame.timestamp
dt, ts = t - ts, t
try:
fps_graph.add(1.0 / dt)
except ZeroDivisionError:
pass
if g_pool.writer:
try:
g_pool.writer.write_video_frame(frame)
except NonMonotonicTimestampError as e:
logger.error(
"Recorder received non-monotonic timestamp!"
" Stopping the recording!"
)
logger.debug(str(e))
ipc_socket.notify({"subject": "recording.should_stop"})
ipc_socket.notify(
{"subject": "recording.should_stop", "remote_notify": "all"}
)
result = event.get("pupil_detection_result", None)
if result is not None:
pupil_socket.send(result)
cpu_graph.update()
# GL drawing
if window_should_update():
if is_window_visible(main_window):
glfw.glfwMakeContextCurrent(main_window)
clear_gl_screen()
glViewport(0, 0, *g_pool.camera_render_size)
for p in g_pool.plugins:
p.gl_display()
glViewport(0, 0, *window_size)
# render graphs
fps_graph.draw()
cpu_graph.draw()
# render GUI
try:
clipboard = glfw.glfwGetClipboardString(main_window).decode()
except AttributeError: # clipboard is None, might happen on startup
clipboard = ""
g_pool.gui.update_clipboard(clipboard)
user_input = g_pool.gui.update()
if user_input.clipboard != clipboard:
# only write to clipboard if content changed
glfw.glfwSetClipboardString(
main_window, user_input.clipboard.encode()
)
for button, action, mods in user_input.buttons:
x, y = glfw.glfwGetCursorPos(main_window)
pos = x * hdpi_factor, y * hdpi_factor
pos = normalize(pos, g_pool.camera_render_size)
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos, g_pool.capture.frame_size)
for plugin in g_pool.plugins:
if plugin.on_click(pos, button, action):
break
for key, scancode, action, mods in user_input.keys:
for plugin in g_pool.plugins:
if plugin.on_key(key, scancode, action, mods):
break
for char_ in user_input.chars:
for plugin in g_pool.plugins:
if plugin.on_char(char_):
break
# update screen
glfw.glfwSwapBuffers(main_window)
glfw.glfwPollEvents()
# END while running
# in case eye recording was still runnnig: Save&close
if g_pool.writer:
logger.info("Done recording eye.")
g_pool.writer.release()
g_pool.writer = None
session_settings["loaded_plugins"] = g_pool.plugins.get_initializers()
# save session persistent settings
session_settings["gui_scale"] = g_pool.gui_user_scale
session_settings["flip"] = g_pool.flip
session_settings["display_mode"] = g_pool.display_mode
session_settings["ui_config"] = g_pool.gui.configuration
session_settings["version"] = str(g_pool.version)
if not hide_ui:
glfw.glfwRestoreWindow(main_window) # need to do this for windows os
session_settings["window_position"] = glfw.glfwGetWindowPos(main_window)
session_window_size = glfw.glfwGetWindowSize(main_window)
if 0 not in session_window_size:
session_settings["window_size"] = session_window_size
session_settings.close()
for plugin in g_pool.plugins:
plugin.alive = False
g_pool.plugins.clean()
glfw.glfwDestroyWindow(main_window)
g_pool.gui.terminate()
glfw.glfwTerminate()
logger.info("Process shutting down.")
|
45,534 |
def test_nested_span_sampling_override():
with start_transaction(name="outer", sampled=True) as span:
assert span.sampled is True
with start_transaction(name="inner", sampled=False) as span2:
assert span2.sampled is False
assert span.sampled is True
|
def test_nested_transaction_sampling_override():
with start_transaction(name="outer", sampled=True) as span:
assert span.sampled is True
with start_transaction(name="inner", sampled=False) as span2:
assert span2.sampled is False
assert span.sampled is True
|
26,918 |
def get_excluded_providers() -> List:
"""
Returns packages excluded for the current python version.
Currently the only excluded provider is apache hive for Python 3.9.
Until https://github.com/dropbox/PyHive/issues/380 is fixed.
"""
return ['apache.hive'] if PY39 else []
|
def get_excluded_providers() -> List[str]:
"""
Returns packages excluded for the current python version.
Currently the only excluded provider is apache hive for Python 3.9.
Until https://github.com/dropbox/PyHive/issues/380 is fixed.
"""
return ['apache.hive'] if PY39 else []
|
7,548 |
def test_get_invalid():
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu")
assert not os.path.isfile(path)
assert not os.path.isdir(path)
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package="astropy")
assert not os.path.isfile(path)
assert not os.path.isdir(path)
|
def test_get_invalid():
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path(("kjfrhgjkla", "hgiulrhgiu"))
assert not os.path.isfile(path)
assert not os.path.isdir(path)
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package="astropy")
assert not os.path.isfile(path)
assert not os.path.isdir(path)
|
41,237 |
def transformer(cls_or_func: Any) -> Any:
"""Decorator to verify API and append logging functionality to transformer functions & classes.
A transformer is a callable that takes as inputs a cirq.AbstractCircuit and
cirq.TransformerContext, and returns another cirq.AbstractCircuit without
modifying the input circuit. A transformer could be a function, for example:
>>> @cirq.transformer
>>> def convert_to_cz(
>>> circuit: cirq.AbstractCircuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Or it could be a class that implements `__call__` with the same API, for example:
>>> @cirq.transformer
>>> class ConvertToSqrtISwaps:
>>> def __init__(self):
>>> ...
>>> def __call__(
>>> self, circuit: cirq.Circuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Args:
cls_or_func: The callable class or function to be decorated.
Returns:
Decorated class / function which includes additional logging boilerplate.
"""
if isinstance(cls_or_func, type):
cls = cls_or_func
method = cls.__call__
@functools.wraps(method)
def method_with_logging(self, circuit, context):
return _transform_and_log(
lambda circuit, context: method(self, circuit, context),
cls.__name__,
circuit,
context,
)
setattr(cls, '__call__', method_with_logging)
return cls
else:
assert callable(cls_or_func)
func = cls_or_func
@functools.wraps(func)
def func_with_logging(circuit, context):
return _transform_and_log(func, func.__name__, circuit, context)
return func_with_logging
|
def transformer(cls_or_func: Any) -> Any:
"""Decorator to verify API and append logging functionality to transformer functions & classes.
A transformer is a callable that takes as inputs a `cirq.AbstractCircuit` and
cirq.TransformerContext, and returns another cirq.AbstractCircuit without
modifying the input circuit. A transformer could be a function, for example:
>>> @cirq.transformer
>>> def convert_to_cz(
>>> circuit: cirq.AbstractCircuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Or it could be a class that implements `__call__` with the same API, for example:
>>> @cirq.transformer
>>> class ConvertToSqrtISwaps:
>>> def __init__(self):
>>> ...
>>> def __call__(
>>> self, circuit: cirq.Circuit, context: cirq.TransformerContext
>>> ) -> cirq.Circuit:
>>> ...
Args:
cls_or_func: The callable class or function to be decorated.
Returns:
Decorated class / function which includes additional logging boilerplate.
"""
if isinstance(cls_or_func, type):
cls = cls_or_func
method = cls.__call__
@functools.wraps(method)
def method_with_logging(self, circuit, context):
return _transform_and_log(
lambda circuit, context: method(self, circuit, context),
cls.__name__,
circuit,
context,
)
setattr(cls, '__call__', method_with_logging)
return cls
else:
assert callable(cls_or_func)
func = cls_or_func
@functools.wraps(func)
def func_with_logging(circuit, context):
return _transform_and_log(func, func.__name__, circuit, context)
return func_with_logging
|
42,088 |
def updates_state(f: Callable[..., Any]) -> Callable[..., Any]:
"""Method decorator to fetch updated trial state from rank 0 after f is run.
This decorator ensures trial properties (params, distributions, etc.) on all distributed
processes are up-to-date with the wrapped trial stored on rank 0.
It should be applied to all Trial methods that update property values.
"""
@functools.wraps(f)
def wrapped(self: Any, *args, **kwargs) -> Any:
def state() -> Sequence:
assert self._delegate is not None
return (
self._delegate.number,
self._delegate.params,
self._delegate.distributions,
self._delegate.user_attrs,
self._delegate.system_attrs,
self._delegate.datetime_start,
)
try:
return f(self, *args, **kwargs)
finally:
(
self._number,
self._params,
self._distributions,
self._user_attrs,
self._system_attrs,
self._datetime_start,
) = self._call_and_communicate_obj(state)
return wrapped
|
def updates_state(f: Callable[..., Any]) -> Callable[..., Any]:
"""Method decorator to fetch updated trial properties from rank 0 after ``f`` is run.
This decorator ensures trial properties (params, distributions, etc.) on all distributed
processes are up-to-date with the wrapped trial stored on rank 0.
It should be applied to all Trial methods that update property values.
"""
@functools.wraps(f)
def wrapped(self: Any, *args, **kwargs) -> Any:
def state() -> Sequence:
assert self._delegate is not None
return (
self._delegate.number,
self._delegate.params,
self._delegate.distributions,
self._delegate.user_attrs,
self._delegate.system_attrs,
self._delegate.datetime_start,
)
try:
return f(self, *args, **kwargs)
finally:
(
self._number,
self._params,
self._distributions,
self._user_attrs,
self._system_attrs,
self._datetime_start,
) = self._call_and_communicate_obj(state)
return wrapped
|
52,931 |
def write_api_entry_usage(gallery_conf, target_dir):
if gallery_conf['backreferences_dir'] is None:
return
backreferences_dir = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'])
example_files = [example for example in os.listdir(backreferences_dir)
if '__' not in example and
((example.endswith('.examples') and
not os.path.isfile(example + '.new')) or
example.endswith('.examples.new'))]
def get_entry(entry):
"""Remove all trailing .examples and .examples.new instances."""
if entry.endswith('.new'):
entry = entry[:-4]
if entry.endswith('.examples'):
entry = entry[:-9]
return entry
def get_entry_type(entry):
"""Infer type from capitalization."""
if any([char.isupper() for char in entry.split('.')[-1]]):
return 'class'
return 'py:obj'
# modules have classes and functions in them, so check if there exists
# classes or functions that have the module as a root and, if so,
# remove the module (it must be lower case, classes have methods
# that include the class name as the prefix but should be kept)
for example in example_files.copy():
if any([char.isupper() for char in example]):
continue # include classes (camel case)
for example2 in example_files:
if example != example2 and \
get_entry(example) in get_entry(example2):
example_files.remove(example)
break
total_count = len(example_files)
if total_count == 0:
return
try:
import graphviz
has_graphviz = True
except ImportError:
logger.info('`graphviz` required to graphical visualization')
has_graphviz = False
target_dir_clean = os.path.relpath(
target_dir, gallery_conf['src_dir']).replace(os.path.sep, '_')
new_ref = 'sphx_glr_%s_sg_api_usage' % target_dir_clean
replace_count = len('sphx_glr_' + os.path.basename(target_dir) + '_')
with codecs.open(os.path.join(target_dir, 'sg_api_usage.rst'), 'w',
encoding='utf-8') as fid:
fid.write(SPHX_GLR_ORPHAN.format(new_ref))
unused_api_entries = list()
used_api_entries = dict()
for example in example_files:
# check if backreferences empty
example_fname = os.path.join(backreferences_dir, example)
entry = get_entry(example)
# TODO: remove after fixing bug in identify_name
if entry == 'numpy.RandomState':
entry = 'numpy.random.RandomState'
if os.path.getsize(example_fname) == 0:
unused_api_entries.append(entry)
else:
used_api_entries[entry] = list()
with open(example_fname, 'r', encoding='utf-8') as fid2:
for line in fid2:
if line.startswith(' :ref:'):
example_name = line.split('`')[1]
used_api_entries[entry].append(example_name)
title = 'Unused API Entries'
fid.write(title + '\n' + '=' * len(title) + '\n\n')
for entry in sorted(unused_api_entries):
fid.write(f'- :{get_entry_type(entry)}:`{entry}`\n')
fid.write('\n\n')
unused_dot_fname = os.path.join(target_dir, 'sg_api_unused.dot')
if has_graphviz and unused_api_entries:
fid.write('.. graphviz:: ./sg_api_unused.dot\n'
' :alt: API used entries graph\n'
' :layout: neato\n\n')
used_count = len(used_api_entries)
used_percentage = used_count / total_count
fid.write('\nAPI entries used: '
f'{round(used_percentage * 100, 2)}% '
f'({used_count}/{total_count})\n\n')
title = 'Used API Entries'
fid.write(title + '\n' + '=' * len(title) + '\n\n')
for entry in sorted(used_api_entries):
fid.write(f'- :{get_entry_type(entry)}:`{entry}`\n\n')
for ref in used_api_entries[entry]:
fid.write(f' - :ref:`{ref}`\n')
fid.write('\n\n')
used_dot_fname = os.path.join(target_dir, 'sg_api_used.dot')
if has_graphviz and used_api_entries:
fid.write('.. graphviz:: ./sg_api_used.dot\n'
' :alt: API usage graph\n'
' :layout: neato\n\n')
# design graph
if has_graphviz and unused_api_entries:
dg = graphviz.Digraph('api_usage', filename=unused_dot_fname,
node_attr={'color': 'lightblue2',
'style': 'filled'})
unused_api_connections = set()
unused_api_struct = [entry.split('.') for entry in unused_api_entries]
n_levels = max([len(struct) for struct in unused_api_struct])
for level in range(n_levels):
for struct in unused_api_struct:
if len(struct) <= level + 1:
continue
if (struct[level], struct[level + 1]) in \
unused_api_connections:
continue
unused_api_connections.add((struct[level], struct[level + 1]))
dg.edge(struct[level], struct[level + 1])
dg.attr(overlap='scale')
dg.attr(fontsize='32')
dg.save(unused_dot_fname)
if has_graphviz and used_api_entries:
dg = graphviz.Digraph('api_usage', filename=used_dot_fname,
node_attr={'color': 'lightblue2',
'style': 'filled'})
for entry, refs in used_api_entries.items():
for ref in refs:
dg.edge(entry, ref[replace_count:])
dg.attr(overlap='scale')
dg.attr(fontsize='32')
dg.save(used_dot_fname)
|
def write_api_entry_usage(gallery_conf, target_dir):
if gallery_conf['backreferences_dir'] is None:
return
backreferences_dir = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'])
example_files = [example for example in os.listdir(backreferences_dir)
if '__' not in example and
((example.endswith('.examples') and
not os.path.isfile(example + '.new')) or
example.endswith('.examples.new'))]
def get_entry(entry):
"""Remove all trailing .examples and .examples.new instances."""
if entry.endswith('.new'):
entry = entry[:-4]
if entry.endswith('.examples'):
entry = entry[:-9]
return entry
def get_entry_type(entry):
"""Infer type from capitalization."""
if any([char.isupper() for char in entry.split('.')[-1]]):
return 'class'
return 'py:obj'
# modules have classes and functions in them, so check if there exists
# classes or functions that have the module as a root and, if so,
# remove the module (it must be lower case, classes have methods
# that include the class name as the prefix but should be kept)
for example in example_files.copy():
if any([char.isupper() for char in example]):
continue # include classes (camel case)
for example2 in example_files:
if example != example2 and \
get_entry(example) in get_entry(example2):
example_files.remove(example)
break
total_count = len(example_files)
if total_count == 0:
return
try:
import graphviz
has_graphviz = True
except ImportError:
logger.info('`graphviz` required to graphical visualization')
has_graphviz = False
target_dir_clean = os.path.relpath(
target_dir, gallery_conf['src_dir']).replace(os.path.sep, '_')
new_ref = 'sphx_glr_%s_sg_api_usage' % target_dir_clean
replace_count = len('sphx_glr_' + os.path.basename(target_dir) + '_')
with codecs.open(os.path.join(target_dir, 'sg_api_usage.rst'), 'w',
encoding='utf-8') as fid:
fid.write(SPHX_GLR_ORPHAN.format(new_ref))
unused_api_entries = list()
used_api_entries = dict()
for example in example_files:
# check if backreferences empty
example_fname = os.path.join(backreferences_dir, example)
entry = get_entry(example)
# TODO: remove after fixing bug in identify_name
if entry == 'numpy.RandomState':
entry = 'numpy.random.RandomState'
if os.path.getsize(example_fname) == 0:
unused_api_entries.append(entry)
else:
used_api_entries[entry] = list()
with open(example_fname, 'r', encoding='utf-8') as fid2:
for line in fid2:
if line.startswith(' :ref:'):
example_name = line.split('`')[1]
used_api_entries[entry].append(example_name)
title = 'Unused API Entries'
fid.write(title + '\n' + '=' * len(title) + '\n\n')
for entry in sorted(unused_api_entries):
fid.write(f'- :{get_entry_type(entry)}:`{entry}`\n')
fid.write('\n\n')
unused_dot_fname = os.path.join(target_dir, 'sg_api_unused.dot')
if has_graphviz and unused_api_entries:
fid.write('.. graphviz:: ./sg_api_unused.dot\n'
' :alt: API used entries graph\n'
' :layout: neato\n\n')
used_count = len(used_api_entries)
used_percentage = used_count / total_count
fid.write('\nAPI entries used: '
f'{round(used_percentage * 100, 2)}% '
f'({used_count}/{total_count})\n\n')
title = 'Used API Entries'
fid.write(title + '\n' + '=' * len(title) + '\n\n')
for entry in sorted(used_api_entries):
fid.write(f'- :{get_entry_type(entry)}:`{entry}`\n\n')
for ref in used_api_entries[entry]:
fid.write(f' - :ref:`{ref}`\n')
fid.write('\n\n')
used_dot_fname = os.path.join(target_dir, 'sg_api_used.dot')
if has_graphviz and used_api_entries:
fid.write('.. graphviz:: ./sg_api_used.dot\n'
' :alt: API usage graph\n'
' :layout: neato\n\n')
# design graph
if has_graphviz and unused_api_entries:
dg = graphviz.Digraph('api_usage', filename=unused_dot_fname,
node_attr={'color': 'lightblue2',
'style': 'filled'})
unused_api_connections = set()
unused_api_struct = [entry.split('.') for entry in unused_api_entries]
n_levels = max([len(struct) for struct in unused_api_struct])
for level in range(n_levels):
for struct in unused_api_struct:
if len(struct) <= level + 1:
continue
if (struct[level], struct[level + 1]) in \
unused_api_connections:
continue
unused_api_connections.add((struct[level], struct[level + 1]))
dg.edge(struct[level], struct[level + 1])
dg.attr(overlap='scale')
dg.attr(fontsize='32')
dg.save(unused_dot_fname)
if has_graphviz and used_api_entries:
dg = graphviz.Digraph('api_usage', filename=used_dot_fname,
node_attr={'color': 'lightblue2',
'style': 'filled'})
for entry, refs in used_api_entries.items():
for ref in refs:
dg.edge(entry, ref[replace_count:])
dg.attr(overlap='scale')
dg.attr(fontsize='32')
dg.save(used_dot_fname)
|
45,845 |
def image_hist2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: float = -1.0,
centers: torch.Tensor = torch.tensor([]),
return_pdf: bool = False,
kernel: str = "triangular",
):
"""Function that estimates the histogram of the input image(s).
The calculation uses triangular kernel density estimation.
Args:
x: Input tensor to compute the histogram with shape
:math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.
min: Lower end of the interval (inclusive).
max: Upper end of the interval (inclusive). Ignored when
:attr:`centers` is specified.
n_bins: The number of histogram bins. Ignored when
:attr:`centers` is specified.
bandwidth: Smoothing factor. If not specified or equal to -1,
bandwidth = (max - min) / n_bins.
centers: Centers of the bins with shape :math:`(n_bins,)`.
If not specified or empty, it is calculated as centers of
equal width bins of [min, max] range.
return_pdf: If True, also return probability densities for
each bin.
kernel: kernel to perform kernel density estimation
(`triangular`, `gaussian`, `uniform`, `epanechnikov`).
Returns:
Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`.
Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape
of the histogram otherwise.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers.numel() > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth != -1 and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if not isinstance(kernel, str):
raise TypeError(f"Kernel type is not a str. Got {type(kernel)}.")
device = image.device
if image.dim() == 4:
batch_size, n_channels, height, width = image.size()
elif image.dim() == 3:
batch_size = 1
n_channels, height, width = image.size()
elif image.dim() == 2:
height, width = image.size()
batch_size, n_channels = 1, 1
else:
raise ValueError(f"Input values must be a tensor of the shape " f"BxCxHxW, CxHxW or HxW. Got {image.shape}.")
if bandwidth == -1.0:
bandwidth = (max - min) / n_bins
if centers.numel() == 0:
centers = min + bandwidth * (torch.arange(n_bins, device=device).float() + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "triangular":
mask = (u <= 1).float()
kernel_values = (1 - u) * mask
elif kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel == "uniform":
mask = (u <= 1).float()
kernel_values = torch.ones_like(u, dtype=u.dtype, device=u.device) * mask
elif kernel == "epanechnikov":
mask = (u <= 1).float()
kernel_values = (1 - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1).unsqueeze(0) + 1e-10
pdf = hist / normalization
return hist, pdf
return hist, torch.zeros_like(hist, dtype=hist.dtype, device=device)
|
def image_hist2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: float = -1.0,
centers: torch.Tensor = torch.tensor([]),
return_pdf: bool = False,
kernel: str = "triangular",
):
"""Function that estimates the histogram of the input image(s).
The calculation uses triangular kernel density estimation.
Args:
image: Input tensor to compute the histogram with shape
:math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.
min: Lower end of the interval (inclusive).
max: Upper end of the interval (inclusive). Ignored when
:attr:`centers` is specified.
n_bins: The number of histogram bins. Ignored when
:attr:`centers` is specified.
bandwidth: Smoothing factor. If not specified or equal to -1,
bandwidth = (max - min) / n_bins.
centers: Centers of the bins with shape :math:`(n_bins,)`.
If not specified or empty, it is calculated as centers of
equal width bins of [min, max] range.
return_pdf: If True, also return probability densities for
each bin.
kernel: kernel to perform kernel density estimation
(`triangular`, `gaussian`, `uniform`, `epanechnikov`).
Returns:
Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`.
Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape
of the histogram otherwise.
"""
if not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers.numel() > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth != -1 and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if not isinstance(kernel, str):
raise TypeError(f"Kernel type is not a str. Got {type(kernel)}.")
device = image.device
if image.dim() == 4:
batch_size, n_channels, height, width = image.size()
elif image.dim() == 3:
batch_size = 1
n_channels, height, width = image.size()
elif image.dim() == 2:
height, width = image.size()
batch_size, n_channels = 1, 1
else:
raise ValueError(f"Input values must be a tensor of the shape " f"BxCxHxW, CxHxW or HxW. Got {image.shape}.")
if bandwidth == -1.0:
bandwidth = (max - min) / n_bins
if centers.numel() == 0:
centers = min + bandwidth * (torch.arange(n_bins, device=device).float() + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "triangular":
mask = (u <= 1).float()
kernel_values = (1 - u) * mask
elif kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel == "uniform":
mask = (u <= 1).float()
kernel_values = torch.ones_like(u, dtype=u.dtype, device=u.device) * mask
elif kernel == "epanechnikov":
mask = (u <= 1).float()
kernel_values = (1 - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1).unsqueeze(0) + 1e-10
pdf = hist / normalization
return hist, pdf
return hist, torch.zeros_like(hist, dtype=hist.dtype, device=device)
|
49,552 |
def concat(
dfs,
axis=0,
join="outer",
interleave_partitions=False,
ignore_unknown_divisions=False,
):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions: boolean, default False
Whether to warn when concatenating dask series/dataframes
with unknown divisions. If True the warning won't be emitted.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
If we set ignore_unknown_divisions=True, the warning won't be emitted
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>> dd.concat([ # doctest: +SKIP
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
else:
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(dfs, axis=axis, join=join)
elif (
len(dasks) == len(dfs)
and all(not df.known_divisions for df in dfs)
and len({df.npartitions for df in dasks}) == 1
):
if not ignore_unknown_divisions:
warnings.warn(
"Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indexes of each dataframes"
" are \n aligned. This assumption is not generally "
"safe."
)
return concat_unindexed_dataframes(dfs)
else:
raise ValueError(
"Unable to concatenate DataFrame with unknown "
"division specifying axis=1"
)
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(
dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)
):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(dfs, divisions, join=join)
elif interleave_partitions:
return concat_indexed_dataframes(dfs, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
|
def concat(
dfs,
axis=0,
join="outer",
interleave_partitions=False,
ignore_unknown_divisions=False,
):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions: boolean, default False
Whether to warn when concatenating dask series/dataframes
with unknown divisions. If True the warning won't be emitted.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
By default concatenating with unknown divisions will raise a warning.
Set ``ignore_unknown_division=True`` to disable this:
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>> dd.concat([ # doctest: +SKIP
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
else:
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(dfs, axis=axis, join=join)
elif (
len(dasks) == len(dfs)
and all(not df.known_divisions for df in dfs)
and len({df.npartitions for df in dasks}) == 1
):
if not ignore_unknown_divisions:
warnings.warn(
"Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indexes of each dataframes"
" are \n aligned. This assumption is not generally "
"safe."
)
return concat_unindexed_dataframes(dfs)
else:
raise ValueError(
"Unable to concatenate DataFrame with unknown "
"division specifying axis=1"
)
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(
dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)
):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(dfs, divisions, join=join)
elif interleave_partitions:
return concat_indexed_dataframes(dfs, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
|
9,014 |
def ascii_lower(text: str) -> str:
"""Lower ```text``` according to the ASCII CASEMAPPING"""
return text.translate(ASCII_TABLE)
|
def ascii_lower(text: str) -> str:
"""Lower ``text`` according to the ASCII CASEMAPPING"""
return text.translate(ASCII_TABLE)
|
964 |
def build_filter_orderby(orderby):
"""Builds filters using the filter options passed into the CLI.
Only support fot create filter option orderBy, default value is DESC.
"""
_filters = {}
aux = list(reversed(str(orderby).split('.')))
for split in aux:
_aux_filter = {}
if str(split).__contains__('='):
_aux_filter[str(split).split('=')[0]] = query_filter_orderby(str(split).split('=')[1])
_filters = _aux_filter
elif split == list(aux)[0]:
_aux_filter[split] = query_filter_orderby('DESC')
else:
_aux_filter[split] = _filters
_filters = _aux_filter
return _filters
|
def build_filter_orderby(orderby):
"""Builds filters using the filter options passed into the CLI.
It only supports the orderBy option, the default value is DESC.
"""
_filters = {}
aux = list(reversed(str(orderby).split('.')))
for split in aux:
_aux_filter = {}
if str(split).__contains__('='):
_aux_filter[str(split).split('=')[0]] = query_filter_orderby(str(split).split('=')[1])
_filters = _aux_filter
elif split == list(aux)[0]:
_aux_filter[split] = query_filter_orderby('DESC')
else:
_aux_filter[split] = _filters
_filters = _aux_filter
return _filters
|
3,143 |
def duplicated(
values: np.ndarray | ExtensionArray | Series,
keep: Literal["first", "last", False] = "first",
) -> np.ndarray:
"""
Return boolean ndarray denoting duplicate values.
Parameters
----------
values : nd.array, ExtensionArray or Series
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray[bool]
"""
values, _ = _ensure_data(values)
return htable.duplicated(values, keep=keep)
|
def duplicated(
values: ArrayLike | Series,
keep: Literal["first", "last", False] = "first",
) -> np.ndarray:
"""
Return boolean ndarray denoting duplicate values.
Parameters
----------
values : nd.array, ExtensionArray or Series
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray[bool]
"""
values, _ = _ensure_data(values)
return htable.duplicated(values, keep=keep)
|
31,836 |
def get_used_dockers_images(export_to_context: bool = True) -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
''' Examples for output: { 'demisto/python3:3.9.7.24076' : ['ListUsedDockerImage', 'VirusTotal',...]}'''
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, '/settings/integration/search',
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f'response code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'])
active_automation = demisto.internalHttpRequest(POST_COMMAND, '/automation/search',
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f'response code = {0}', active_automation['statusCode'])
if active_automation and active_automation['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation['body'])
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Dockers Images In use:', result_output)
if export_to_context:
return CommandResults(
outputs_prefix='Docker Image',
outputs_key_field='Docker Image',
outputs=result_output,
readable_output=md)
else:
return CommandResults(readable_output=md)
|
def get_used_dockers_images(export_to_context: bool = True) -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
''' Examples for output: { 'demisto/python3:3.9.7.24076' : ['ListUsedDockerImage', 'VirusTotal',...]}'''
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, '/settings/integration/search',
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f'response code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'])
active_automation = demisto.internalHttpRequest(POST_COMMAND, '/automation/search',
REQUEST_INTEGRATION_SEARCH_BODY)
demisto.debug(f'response code = {0}', active_automation['statusCode'])
if active_automation and active_automation['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation['body'])
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Dockers Images In use:', result_output)
if export_to_context:
return CommandResults(
outputs_prefix='UsedDockerImages',
outputs_key_field='Docker Image',
outputs=result_output,
readable_output=md)
else:
return CommandResults(readable_output=md)
|
25,649 |
def tabulate_module_summary(module: tf.Module, tablefmt: str = None) -> str:
column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']
def get_name(v):
return v.__class__.__name__
def get_transform(v):
if hasattr(v, "transform") and v.transform is not None:
return v.transform.__class__.__name__
return None
merged_leaf_components = _merge_leaf_components(leaf_components(module))
column_values = [[
path,
get_name(variable),
get_transform(variable),
variable.trainable,
variable.shape,
variable.dtype.name,
_str_tensor_value(variable.numpy())
] for path, variable in merged_leaf_components.items()]
return tabulate(column_values, headers=column_names, tablefmt=tablefmt)
|
def tabulate_module_summary(module: tf.Module, tablefmt: Optional[str] = None) -> str:
column_names = ['name', 'class', 'transform', 'trainable', 'shape', 'dtype', 'value']
def get_name(v):
return v.__class__.__name__
def get_transform(v):
if hasattr(v, "transform") and v.transform is not None:
return v.transform.__class__.__name__
return None
merged_leaf_components = _merge_leaf_components(leaf_components(module))
column_values = [[
path,
get_name(variable),
get_transform(variable),
variable.trainable,
variable.shape,
variable.dtype.name,
_str_tensor_value(variable.numpy())
] for path, variable in merged_leaf_components.items()]
return tabulate(column_values, headers=column_names, tablefmt=tablefmt)
|
58,222 |
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in BLACKLIST_ENDPOINT:
blacklisted = BLACKLIST_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict((name, value) for (name, value) in zip(args_names, args) if name in args_traced)
flat_tags = _flatten_dict(tags, exclude=blacklisted)
span.set_tags({k: truncate_arg_value(v) for k, v in flat_tags.items()})
|
def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced):
if endpoint_name not in EXCLUDED_ENDPOINT:
exclude = EXCLUDED_ENDPOINT_TAGS.get(endpoint_name, [])
tags = dict((name, value) for (name, value) in zip(args_names, args) if name in args_traced)
flat_tags = _flatten_dict(tags, exclude=blacklisted)
span.set_tags({k: truncate_arg_value(v) for k, v in flat_tags.items()})
|
44,610 |
def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop(key + '_shape')
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key=key + '_val',
index_key=[key + '_ind_' + str(i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = [key + '_shape_' + str(i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
|
def parse_tracking_example(example, dataset_ndims,
dtype=tf.float32):
X_names = ['app', 'cent', 'morph', 'adj']
y_names = ['temp_adj']
sparse_names = ['adj', 'temp_adj']
full_name_dict = {'app': 'appearances',
'cent': 'centroids',
'morph': 'morphologies',
'adj': 'adj_matrices',
'temp_adj': 'temporal_adj_matrices'}
# Recreate the example structure
data = {}
shape_strings_dict = {}
shapes_dict = {}
for key in dataset_ndims:
if 'shape' in key:
new_key = '_'.join(key.split('_')[0:-1])
shapes_dict[new_key] = dataset_ndims[key]
for key in shapes_dict:
dataset_ndims.pop('{}_shape'.format(key))
for key in dataset_ndims:
if key in sparse_names:
data[key] = tf.io.SparseFeature(value_key=key + '_val',
index_key=[key + '_ind_' + str(i)
for i in range(dataset_ndims[key])],
size=shapes_dict[key],
dtype=tf.float32)
else:
data[key] = tf.io.FixedLenFeature([], tf.string)
shape_strings = [key + '_shape_' + str(i)
for i in range(dataset_ndims[key])]
shape_strings_dict[key] = shape_strings
for ss in shape_strings:
data[ss] = tf.io.FixedLenFeature([], tf.int64)
# Get data
content = tf.io.parse_single_example(example, data)
X_dict = {}
y_dict = {}
for key in dataset_ndims:
# Get the feature and reshape
if key in sparse_names:
value = content[key]
else:
shape = [content[ss] for ss in shape_strings_dict[key]]
value = content[key]
value = tf.io.parse_tensor(value, out_type=dtype)
value = tf.reshape(value, shape=shape)
if key in X_names:
X_dict[full_name_dict[key]] = value
else:
y_dict[full_name_dict[key]] = value
return X_dict, y_dict
|
3,839 |
def test_all_simple_paths_with_two_targets_inside_cycle():
G = nx.cycle_graph(3, create_using=nx.DiGraph())
G.add_edge(1, 3)
paths = nx.all_simple_paths(G, 0, [2, 3])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2), (0, 1, 3)})
|
def test_all_simple_paths_with_two_targets_inside_cycle_emits_two_paths():
G = nx.cycle_graph(3, create_using=nx.DiGraph())
G.add_edge(1, 3)
paths = nx.all_simple_paths(G, 0, [2, 3])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2), (0, 1, 3)})
|
36,467 |
def aix_platform():
# type: () -> str
lpp, vrmf, bd = _aix_bosmp64()
assert lpp == "bos.mp64", "%s != %s" % (lpp, "bos.mp64")
return _aix_tag(_aix_vrtl(vrmf), int(bd))
|
def aix_platform():
# type: () -> str
lpp, vrmf, bd = _aix_bosmp64()
assert lpp == "bos.mp64", f"{lpp} != bos.mp64"
return _aix_tag(_aix_vrtl(vrmf), int(bd))
|
9,069 |
def _receive_cap_ack(bot: SopelWrapper, trigger: Trigger) -> None:
was_completed = bot.cap_requests.is_complete
cap_ack: Tuple[str, ...] = bot.capabilities.handle_ack(bot, trigger)
try:
result: Optional[
List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]]
] = bot.cap_requests.acknowledge(bot, cap_ack)
except config.ConfigurationError as error:
LOGGER.error(
'Configuration error on ACK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Wrong configuration.')
return None
except Exception as error:
LOGGER.exception(
'Error on ACK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Error negotiating capabilities.')
return None
if result is None:
# a plugin may have request the capability without using the proper
# interface: ignore
return None
_handle_cap_acknowledgement(bot, cap_ack, result, was_completed)
|
def _receive_cap_ack(bot: SopelWrapper, trigger: Trigger) -> None:
was_completed = bot.cap_requests.is_complete
cap_ack: Tuple[str, ...] = bot.capabilities.handle_ack(bot, trigger)
try:
result: Optional[
List[Tuple[bool, Optional[plugin.CapabilityNegotiation]]]
] = bot.cap_requests.acknowledge(bot, cap_ack)
except config.ConfigurationError as error:
LOGGER.error(
'Configuration error on ACK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Configuration error.')
return None
except Exception as error:
LOGGER.exception(
'Error on ACK capability "%s": %s',
', '.join(cap_ack),
error,
)
bot.write(('CAP', 'END')) # close negotiation now
bot.quit('Error negotiating capabilities.')
return None
if result is None:
# a plugin may have request the capability without using the proper
# interface: ignore
return None
_handle_cap_acknowledgement(bot, cap_ack, result, was_completed)
|
44,836 |
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``spark`` flavor.
"""
# NOTE: The getOrCreate() call below may change settings of the active session which we do not
# intend to do here. In particular, setting master to local[1] can break distributed clusters.
# To avoid this problem, we explicitly check for an active session. This is not ideal but there
# is no good workaround at the moment.
import pyspark
spark = pyspark.sql.SparkSession._instantiatedSession
if spark is None:
# NB: If there is no existing Spark context, create a new local one.
# NB: We're disabling caching on the new context since we do not need it and we want to
# avoid overwriting cache of underlying Spark cluster when executed on a Spark Worker
# (e.g. as part of spark_udf).
spark = (
pyspark.sql.SparkSession.builder.config("spark.python.worker.reuse", True)
.config("spark.databricks.io.cache.enabled", False)
# In Spark 3.1 and above, we need to set this conf explicitly to enable creating
# a SparkSession on the workers
.config("spark.executor.allowSparkContext", "true")
# Bind "spark.driver.bindAddress" to 127.0.0.1 helps avoiding some local hostname
# related issues.
.config("spark.driver.bindAddress", "127.0.0.1")
.master("local[1]")
.getOrCreate()
)
return _PyFuncModelWrapper(spark, _load_model(model_uri=path))
|
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_model``.
:param path: Local filesystem path to the MLflow Model with the ``spark`` flavor.
"""
# NOTE: The getOrCreate() call below may change settings of the active session which we do not
# intend to do here. In particular, setting master to local[1] can break distributed clusters.
# To avoid this problem, we explicitly check for an active session. This is not ideal but there
# is no good workaround at the moment.
import pyspark
spark = pyspark.sql.SparkSession._instantiatedSession
if spark is None:
# NB: If there is no existing Spark context, create a new local one.
# NB: We're disabling caching on the new context since we do not need it and we want to
# avoid overwriting cache of underlying Spark cluster when executed on a Spark Worker
# (e.g. as part of spark_udf).
spark = (
pyspark.sql.SparkSession.builder.config("spark.python.worker.reuse", True)
.config("spark.databricks.io.cache.enabled", False)
# In Spark 3.1 and above, we need to set this conf explicitly to enable creating
# a SparkSession on the workers
.config("spark.executor.allowSparkContext", "true")
# Binding "spark.driver.bindAddress" to 127.0.0.1 helps avoid some local hostname
# related issues.
.config("spark.driver.bindAddress", "127.0.0.1")
.master("local[1]")
.getOrCreate()
)
return _PyFuncModelWrapper(spark, _load_model(model_uri=path))
|
5,954 |
def _get_index_content(link: Link, session: PipSession) -> Optional["IndexContent"]:
url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning(
"Cannot look at %s URL %s because it does not support lookup as web pages.",
vcs_scheme,
link,
)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith("/"):
url += "/"
# TODO: In the future, it would be nice if pip supported PEP 691
# style respones in the file:// URLs, however there's no
# standard file extension for application/vnd.pypi.simple.v1+json
# so we'll need to come up with something on our own.
url = urllib.parse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_simple_response(url, session=session)
except _NotHTTP:
logger.warning(
"Skipping page %s because it looks like an archive, and cannot "
"be checked by a HTTP HEAD request.",
link,
)
except _NotAPIContent as exc:
logger.warning(
"Skipping page %s because the %s request got Content-Type: %s. "
"The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
"application/vnd.pypi.simple.v1+html, and text/html",
link,
exc.request_desc,
exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_simple_fail(link, exc)
except RetryError as exc:
_handle_get_simple_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_simple_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_simple_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_simple_fail(link, "timed out")
else:
return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
return None
|
def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning(
"Cannot look at %s URL %s because it does not support lookup as web pages.",
vcs_scheme,
link,
)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith("/"):
url += "/"
# TODO: In the future, it would be nice if pip supported PEP 691
# style respones in the file:// URLs, however there's no
# standard file extension for application/vnd.pypi.simple.v1+json
# so we'll need to come up with something on our own.
url = urllib.parse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_simple_response(url, session=session)
except _NotHTTP:
logger.warning(
"Skipping page %s because it looks like an archive, and cannot "
"be checked by a HTTP HEAD request.",
link,
)
except _NotAPIContent as exc:
logger.warning(
"Skipping page %s because the %s request got Content-Type: %s. "
"The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
"application/vnd.pypi.simple.v1+html, and text/html",
link,
exc.request_desc,
exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_simple_fail(link, exc)
except RetryError as exc:
_handle_get_simple_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_simple_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_simple_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_simple_fail(link, "timed out")
else:
return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
return None
|
4,424 |
def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : np.ndarray of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : np.ndarray of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : np.ndarray of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
|
def _temp_proj(ref_2, ref_1, raw_data, n_proj=6):
"""Remove common signal subspace of ref_2 and ref_1 from raw_data.
Parameters
----------
ref_2 : np.ndarray of float, shape (n_sensors_2, n_times)
The magnetometer data for CSS. Can use either all magnetometer data or
a few selected sensors close to a region to be suppressed.
ref_1 : array of float, shape (n_sensors_1, n_times)
The gradiometer data for CSS. Can use either all gradiometer data or
a few selected sensors close to a region to be suppressed.
raw_data : np.ndarray of float, shape (n_sensors_raw, n_times)
The data to be filtered, typically the EEG data.
n_proj : int
The number of projection vectors.
Notes
-----
This temporal projection procedure removes the common signal subspace
between ref_2 and ref_1 from raw_data using n_proj number of
projection vectors. Normally used for cortical signal suppression, where
ref_1 is gradiometer data, ref_2 is magnetometer data and
raw_data is EEG data.
"""
# Orthonormalize gradiometer and magnetometer data by a QR decomposition
ref_1_orth = np.linalg.qr(ref_1.T)[0]
ref_2_orth = np.linalg.qr(ref_2.T)[0]
# Calculate cross-correlation
cross_corr = np.dot(ref_1_orth.T, ref_2_orth)
# Channel weights for common temporal subspace by SVD of cross-correlation
ref_1_ch_weights, _, _ = np.linalg.svd(cross_corr)
# Get temporal signals from channel weights
proj_mat = ref_1_orth @ ref_1_ch_weights
# Project out common subspace
filtered_data = raw_data
proj_vec = proj_mat[:, :n_proj]
weights = filtered_data @ proj_vec
filtered_data -= weights @ proj_vec.T
|
12,146 |
def _prepare_stream_response(
payload, content_type, content_disposition, preview_file=False
):
"""Prepare the streaming response to return to the caller.
It iterates over the response data (payload) to avoid reading the conten at
once into memory.
:param payload: The response object (requests.Response).
:param content_type: Content-Type header value.
:param content_disposition: Content-Disposition header value.
:param preview_file: Display contents inside the web page.
:return: The response object (requests.Response) sent to the browser.
"""
if preview_file:
content_disposition = "inline"
chunk_size = 1024 * 1024
response = StreamingHttpResponse(payload.iter_content(chunk_size=chunk_size))
response["Content-Type"] = content_type
response["Content-Disposition"] = content_disposition
return response
|
def _prepare_stream_response(
payload, content_type, content_disposition, preview_file=False
):
"""Prepare the streaming response to return to the caller.
It iterates over the response data (payload) to avoid reading the content at
once into memory.
:param payload: The response object (requests.Response).
:param content_type: Content-Type header value.
:param content_disposition: Content-Disposition header value.
:param preview_file: Display contents inside the web page.
:return: The response object (requests.Response) sent to the browser.
"""
if preview_file:
content_disposition = "inline"
chunk_size = 1024 * 1024
response = StreamingHttpResponse(payload.iter_content(chunk_size=chunk_size))
response["Content-Type"] = content_type
response["Content-Disposition"] = content_disposition
return response
|
28,857 |
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = MISSING,
):
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes if scopes is not MISSING else ('bot',))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select is not MISSING:
url += '&disable_guild_select='
url += 'true' if disable_guild_select else 'false'
return url
|
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
):
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f'https://discord.com/oauth2/authorize?client_id={client_id}'
url += '&scope=' + '+'.join(scopes if scopes is not MISSING else ('bot',))
if permissions is not MISSING:
url += f'&permissions={permissions.value}'
if guild is not MISSING:
url += f'&guild_id={guild.id}'
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += '&response_type=code&' + urlencode({'redirect_uri': redirect_uri})
if disable_guild_select is not MISSING:
url += '&disable_guild_select='
url += 'true' if disable_guild_select else 'false'
return url
|
45,416 |
def memory_thread(logger, sleep_time):
"""
Configure Modin logging system memory profiling thread.
Parameters
----------
logger : logging.Logger
The logger object.
sleep_time : int
The interval at which to profile system memory.
"""
while True:
rss_mem = bytes_int_to_str(psutil.Process().memory_info().rss)
svmem = psutil.virtual_memory()
logger.info(f"CPU Utilization: {svmem.percent}%")
logger.info(f"RSS Memory: {rss_mem}")
time.sleep(sleep_time)
|
def memory_thread(logger, sleep_time):
"""
Configure Modin logging system memory profiling thread.
Parameters
----------
logger : logging.Logger
The logger object.
sleep_time : int
The interval at which to profile system memory.
"""
while True:
rss_mem = bytes_int_to_str(psutil.Process().memory_info().rss)
svmem = psutil.virtual_memory()
logger.info(f"Memory Percentage: {svmem.percent}%")
logger.info(f"RSS Memory: {rss_mem}")
time.sleep(sleep_time)
|
41,203 |
def _Sdg(q, args, operations, qubits):
# Apply the tableau with S^+, so the inverse of operation is S
args.axes = [q]
protocols.act_on(ops.ZPowGate() ** 1.5, args, allow_decompose=False)
operations.append(ops.S(qubits[q]))
|
def _Sdg(q, args, operations, qubits):
# Apply the tableau with S^\{dagger}
args.axes = [q]
protocols.act_on(ops.ZPowGate() ** 1.5, args, allow_decompose=False)
operations.append(ops.S(qubits[q]))
|
43,693 |
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the returned Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
def bit_driver(wires, n):
r"""Returns the bit-driver cost Hamiltonian component.
This Hamiltonian is defined as:
.. math:: H \ = \ (-1)^{n + 1} \displaystyle\sum_{i} Z_i
where :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire and :math:`n \ \in \ \{0, \ 1\}`. This Hamiltonian is often used as a term when
constructing larger QAOA cost Hamiltonians.
Args:
wires (Iterable or Wires): The wires on which the Hamiltonian acts
n (int): Either :math:`0` or :math:`1`. Determines whether the Hamiltonian assigns
lower energies to bitstrings with more :math:`0`s or :math:`1`s, respectively.
Returns:
.Hamiltonian
**Example**
>>> wires = range(3)
>>> hamiltonian = qaoa.pauli_driver(wires, 1)
>>> print(hamiltonian)
(1.0) [Z0] + (1.0) [Z1] + (1.0) [Z2]
"""
if n == 0:
coeffs = [-1 for _ in wires]
elif n == 1:
coeffs = [1 for _ in wires]
else:
raise ValueError("'state' argument must be either 0 or 1, got {}".format(n))
ops = [qml.PauliZ(w) for w in wires]
return qml.Hamiltonian(coeffs, ops)
|
6,510 |
def get_slideshow(slideshow):
values = {
'show_indicators': 1,
'show_controls': 1,
'rounded': 1,
'slider_name': "Categories"
}
slideshow = frappe.get_doc("Website Slideshow", slideshow)
slides = slideshow.get({"doctype": "Website Slideshow Item"})
for index, slide in enumerate(slides):
values[f"slide_{index + 1}_image"] = slide.image
values[f"slide_{index + 1}_title"] = slide.heading
values[f"slide_{index + 1}_subtitle"] = slide.description
values[f"slide_{index + 1}_theme"] = slide.get("theme") or "Light"
values[f"slide_{index + 1}_content_align"] = slide.get("content_align") or "Centre"
values[f"slide_{index + 1}_primary_action"] = slide.url
return values
|
def get_slideshow(slideshow):
values = {
'show_indicators': 1,
'show_controls': 1,
'rounded': 1,
'slider_name': "Categories"
}
slideshow = frappe.get_cached_doc("Website Slideshow", slideshow)
slides = slideshow.get({"doctype": "Website Slideshow Item"})
for index, slide in enumerate(slides):
values[f"slide_{index + 1}_image"] = slide.image
values[f"slide_{index + 1}_title"] = slide.heading
values[f"slide_{index + 1}_subtitle"] = slide.description
values[f"slide_{index + 1}_theme"] = slide.get("theme") or "Light"
values[f"slide_{index + 1}_content_align"] = slide.get("content_align") or "Centre"
values[f"slide_{index + 1}_primary_action"] = slide.url
return values
|
41,518 |
def all_pois_floating(pdf, fixed_params):
"""
Checks whether all POI(s) are floatig (i.e. not within the fixed set).
Args:
pdf (:obj:`pyhf.Model`): The model
fixed_params: array of bools
Returns:
bool: The result whether all POIs are floating.
"""
poi_fixed = fixed_params[pdf.config.poi_index]
return not poi_fixed
|
def all_pois_floating(pdf, fixed_params):
"""
Checks whether all POI(s) are floatig (i.e. not within the fixed set).
Args:
pdf (:obj:`pyhf.Model`): The model
fixed_params (:obj:`list` or `tensor` of :obj:`bool`): Array of :obj:`bool`s indicating if model parameters are fixed.
Returns:
bool: The result whether all POIs are floating.
"""
poi_fixed = fixed_params[pdf.config.poi_index]
return not poi_fixed
|
8,587 |
def harvest_get_notifications_recipients(context, data_dict):
""" get all recipients for a harvest source
Return a list of dicts like {'name': 'Jhon', 'email': jhon@source.com'} """
check_access('harvest_get_notifications_recipients', context, data_dict)
source_id = data_dict['source_id']
source = p.toolkit.get_action('harvest_source_show')(context, {'id': source_id})
recipients = []
# gather sysadmins
model = context['model']
sysadmins = model.Session.query(model.User).filter(
model.User.sysadmin == True # noqa: E712
).all()
# Send mail to all sysadmins with a non-empty email address
for sysadmin in sysadmins:
email_address = sysadmin.email
if email_address and email_address.strip():
recipients.append({
'name': sysadmin.name,
'email': sysadmin.email
})
# gather organization-admins
if source.get('organization'):
members = p.toolkit.get_action('member_list')(context, {
'id': source['organization']['id'],
'object_type': 'user',
'capacity': 'admin'
})
# Get access to email address by running action as admin user
context['user'] = 'admin'
for member in members:
member_details = p.toolkit.get_action('user_show')(context, {
'id': member[0],
'include_plugin_extras': True
})
email_address = member_details.get('email', None)
if email_address and email_address.strip():
recipients.append({
'name': member_details['name'],
'email': email_address
})
return recipients
|
def harvest_get_notifications_recipients(context, data_dict):
""" get all recipients for a harvest source
Return a list of dicts like {'name': 'Jhon', 'email': jhon@source.com'} """
check_access('harvest_get_notifications_recipients', context, data_dict)
source_id = data_dict['source_id']
source = p.toolkit.get_action('harvest_source_show')(context, {'id': source_id})
recipients = []
# gather sysadmins
model = context['model']
sysadmins = model.Session.query(model.User).filter(
model.User.sysadmin == True # noqa: E712
).all()
# Send mail to all sysadmins with a non-empty email address
for sysadmin in sysadmins:
email_address = sysadmin.email
if email_address and email_address.strip():
recipients.append({
'name': sysadmin.name,
'email': sysadmin.email
})
# gather organization-admins
if source.get('organization'):
members = p.toolkit.get_action('member_list')(context, {
'id': source['organization']['id'],
'object_type': 'user',
'capacity': 'admin'
})
# Get access to email address by running action as admin user
context['user'] = p.toolkit.get_action('get_site_user')({'ignore_auth': True})['name']
for member in members:
member_details = p.toolkit.get_action('user_show')(context, {
'id': member[0],
'include_plugin_extras': True
})
email_address = member_details.get('email', None)
if email_address and email_address.strip():
recipients.append({
'name': member_details['name'],
'email': email_address
})
return recipients
|
27,273 |
def _compare_tuples(a, b):
if len(a) != len(b):
return False
return all(_compare_items(x, y) for x, y in zip(a, b))
|
def _compare_tuples(a, b):
if len(a) != len(b):
return False
return all(map(_compare_items, a, b))
|
1,300 |
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0)
X_bool = X > 0
huber = HuberRegressor().fit(X_bool, y)
|
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
|
57,956 |
def main():
file_type = ''
entry_id = demisto.args()['entryid']
max_depth = int(demisto.args().get('max_depth', '3'))
if max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = demisto.args().get('parse_only_headers', 'false').lower() == 'true'
try:
result = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_path = result[0]['Contents']['path']
file_name = result[0]['Contents']['name']
result = demisto.executeCommand('getEntry', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_metadata = result[0]['FileMetadata']
file_type = file_metadata.get('info', '') or file_metadata.get('type', '')
except Exception as ex:
return_error(
"Failed to load file entry with entry id: {}. Error: {}".format(
entry_id, str(ex) + "\n\nTrace:\n" + traceback.format_exc()))
try:
email_parser = EmailParser(file_path=file_path, max_depth=max_depth, parse_only_headers=parse_only_headers,
file_info=file_type)
output = email_parser.parse()
resultss = []
if isinstance(output, dict):
output = [output]
for email in output:
if email.get('AttachmentsData'):
for attachment in email.get('AttachmentsData'):
if attachment.get('Name') and attachment.get('FileData'):
content = attachment.get('FileData')
del attachment['FileData']
name = attachment.get('Name')
attachment['FilePath'] = save_file(name, content)
resultss.append(CommandResults(outputs_prefix='Email',
outputs=email,
readable_output=data_to_md(email, file_name, email.get('ParentFileName', None),
print_only_headers=parse_only_headers),
raw_response=email,
entry_type=EntryType.NOTE))
return_results(resultss)
except Exception as e:
return_error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
|
def main():
file_type = ''
args = demisto.args()
entry_id = args.get('entryid')
max_depth = arg_to_number(args.get('max_depth', '3'))
if max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = arg_to_boolean(args.get('parse_only_headers', 'false'))
try:
result = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_path = result[0]['Contents']['path']
file_name = result[0]['Contents']['name']
result = demisto.executeCommand('getEntry', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_metadata = result[0]['FileMetadata']
file_type = file_metadata.get('info', '') or file_metadata.get('type', '')
except Exception as ex:
return_error(
"Failed to load file entry with entry id: {}. Error: {}".format(
entry_id, str(ex) + "\n\nTrace:\n" + traceback.format_exc()))
try:
email_parser = EmailParser(file_path=file_path, max_depth=max_depth, parse_only_headers=parse_only_headers,
file_info=file_type)
output = email_parser.parse()
resultss = []
if isinstance(output, dict):
output = [output]
for email in output:
if email.get('AttachmentsData'):
for attachment in email.get('AttachmentsData'):
if attachment.get('Name') and attachment.get('FileData'):
content = attachment.get('FileData')
del attachment['FileData']
name = attachment.get('Name')
attachment['FilePath'] = save_file(name, content)
resultss.append(CommandResults(outputs_prefix='Email',
outputs=email,
readable_output=data_to_md(email, file_name, email.get('ParentFileName', None),
print_only_headers=parse_only_headers),
raw_response=email,
entry_type=EntryType.NOTE))
return_results(resultss)
except Exception as e:
return_error(str(e) + "\n\nTrace:\n" + traceback.format_exc())
|
26,034 |
def load_arguments(self, _):
# Model imports
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes = self.get_models('HyperVGenerationTypes')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01')
license_type = CLIArgumentType(
help="Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "
"Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "
"use 'Windows_Client'. For more information see the Azure Windows VM online docs.",
arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE',
'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC',
'None', 'RHEL_ELS_6']))
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes')
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks')
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.')
enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.')
security_type = CLIArgumentType(arg_type=get_enum_type(self.get_models('SecurityTypes')), min_api='2020-12-01', help='Specify the security type of the virtual machine.')
gallery_image_name_type = CLIArgumentType(options_list=['--gallery-image-definition', '-i'], help='The name of the community gallery image definition from which the image versions are to be listed.', id_part='child_name_2')
gallery_image_name_version_type = CLIArgumentType(options_list=['--gallery-image-version', '-e'], help='The name of the gallery image version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
public_gallery_name_type = CLIArgumentType(help='The public name of community gallery.', id_part='child_name_1')
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot', 'source_restore_point')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.')
c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.')
c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
c.argument('secure_vm_disk_encryption_set', min_api='2021-08-01', help='Name or ID of disk encryption set created with ConfidentialVmEncryptedWithCustomerKey encryption type.')
# endregion
# region Disks
with self.argument_context('disk grant-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('secure_vm_guest_state_sas', options_list=['--secure-vm-guest-state-sas', '-s'], min_api='2022-03-02',
action='store_true', validator=validate_secure_vm_guest_state_sas,
help="Get SAS on managed disk with VM guest state. It will be used by default when the create option of disk is 'secureOSUpload'")
# endregion
# region Disks
with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the Compute, Shared or Community Gallery image version from which to create a disk. For details about valid format, please refer to the help sample')
c.ignore('gallery_image_reference_type')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.')
c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.')
# endregion
# region Disks
with self.argument_context('disk create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('security_data_uri', min_api='2022-03-02', help='Please specify the blob URI of VHD to be imported into VM guest state')
c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30',
deprecate_info=c.deprecate(target='--for-upload', redirect='--upload-type Upload', hide=True),
help='Create the disk for uploading blobs. Replaced by "--upload-type Upload"')
c.argument('upload_type', arg_type=get_enum_type(['Upload', 'UploadWithSecurityData']), min_api='2018-09-30',
help="Create the disk for upload scenario. 'Upload' is for Standard disk only upload. 'UploadWithSecurityData' is for OS Disk upload along with VM Guest State. Please note the 'UploadWithSecurityData' is not valid for data disk upload, it only to be used for OS Disk upload at present.")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.')
c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30',
help='Create the snapshot for uploading blobs later on through storage commands. Run "az snapshot grant-access --access-level Write" to retrieve the snapshot\'s SAS token.')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).')
c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', security_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('disks', nargs='*', help="One or more names or IDs of the managed disk (space-delimited).",
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm extension show') as c:
c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.')
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
c.argument('edge_zone', edge_zone_type)
c.argument('architecture', help='The name of architecture. ', arg_type=get_enum_type(["x64", "Arm64"]))
with self.argument_context('vm image list-offers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-skus') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-publishers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.')
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']))
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "
"value is 2020-11-01.")
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('security_type', security_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.')
run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine')
for scope in ['create', 'update']:
with self.argument_context('vm run-command {}'.format(scope)) as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.')
c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
'will complete as soon as the script starts and will not wait for script to complete.')
c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.')
c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.')
c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.')
with self.argument_context('vm run-command delete') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
with self.argument_context('vm run-command list') as c:
c.argument('vm_name', run_cmd_vm_name, id_part=None)
c.argument('expand', help='The expand expression to apply on the operation.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('vm run-command show') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('command_id', help='The command id.')
with self.argument_context('vm run-command wait') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('command_id', help='The command id.')
run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.')
for scope in ['create', 'update']:
with self.argument_context('vmss run-command {}'.format(scope)) as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
c.argument('script_uri',
help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
c.argument('command_id',
help='Specify a command id of predefined script. All command ids can be listed using "list" command.')
c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
c.argument('protected_parameters', nargs='+',
help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
'will complete as soon as the script starts and will not wait for script to complete.')
c.argument('run_as_user',
help='By default script process runs under system/root user. Specify custom user to host the process.')
c.argument('run_as_password',
help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.')
c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.')
with self.argument_context('vmss run-command delete') as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
with self.argument_context('vmss run-command list') as c:
c.argument('vmss_name', run_cmd_vmss_name, id_part=None)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('expand', help='The expand expression to apply on the operation.')
with self.argument_context('vmss run-command show') as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm application set', 'vmss application set']:
with self.argument_context(scope) as c:
c.argument('vm', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.")
c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.')
c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*',
help='Space-separated application configuration overrides for each application version ids. '
'It should have the same number of items as the application version ids. Null is available for a application '
'which does not have a configuration override.')
c.argument('treat_deployment_as_failure', nargs='*', help="Space-separated list of true or false corresponding to the application version ids. If set to true, failure to install or update gallery application version operation will fail this operation")
for scope in ['vm application list', 'vmss application list']:
with self.argument_context(scope) as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
c.argument('vmss_name', vmss_name_type, id_part=None)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
c.argument('accept_term', action='store_true', help="Accept the license agreement and privacy statement.")
c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.')
c.argument('os_disk_security_encryption_type', arg_type=get_enum_type(self.get_models('SecurityEncryptionTypes')), min_api='2021-11-01', help='Specify the encryption type of the OS managed disk.')
c.argument('os_disk_secure_vm_disk_encryption_set', min_api='2021-11-01', help='Specify the customer managed disk encryption set resource ID or name for the managed disk that is used for customer managed key encrypted Confidential VM OS disk and VM guest blob.')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use "<data_disk>=Delete <data_disk2>=Detach" to configure each disk')
if scope == 'vmss create':
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', help='Specify whether OS Disk should be deleted or detached upon VMSS Flexdeletion (This feature is available for VMSS with Flexible OrchestrationMode only).')
c.argument('data_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', help='Specify whether data disk should be deleted or detached upon VMSS Flexdeletion (This feature is available for VMSS with Flexible OrchestrationModeonly)')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group,
help="Scope that the system assigned identity can access. ")
c.ignore('identity_role_id')
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity',
help='Role name or id the system assigned identity will have. ')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('identity_role', options_list=['--role'], help="Role name or id the system assigned identity will have")
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('license_type', license_type)
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'],
help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.',
min_api='2021-04-01', is_preview=True)
c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available')
c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.')
with self.argument_context('vm update') as c:
c.argument('license_type', license_type)
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig show') as c:
c.argument('select', help='The select expression to apply on the operation.')
c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
c.argument('publisher_uri', help='Community gallery publisher uri.')
c.argument('publisher_contact', options_list=['--publisher-email'], help='Community gallery publisher contact email.')
c.argument('eula', help='Community gallery publisher eula.')
c.argument('public_name_prefix', help='Community gallery public name prefix.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.')
c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `<os_cvm_encryption_type>,<os_cvm_des>`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.')
c.argument('virtual_machine', help='Resource id of VM source')
c.argument('image_version', help='Resource id of gallery image version source')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*',
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
with self.argument_context('sig show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
with self.argument_context('sig list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-definition show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
with self.argument_context('sig image-definition list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-version show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
c.argument('gallery_image_version_name', gallery_image_name_version_type)
with self.argument_context('sig image-version list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig share enable-community') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
c.argument('op_type', default='EnableCommunity', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
# endregion
# region Gallery applications
with self.argument_context('sig gallery-application') as c:
c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'],
help='The name of the gallery Application')
with self.argument_context('sig gallery-application create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('description', help='The description of this gallery Application Definition resource. '
'This property is updatable.')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you '
'to specify the supported type of the OS that application is built for. <br><br> Possible values '
'are: <br><br> **Windows** <br><br> **Linux**')
with self.argument_context('sig gallery-application update') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('description', help='The description of this gallery Application Definition resource. '
'This property is updatable.')
with self.argument_context('sig gallery-application version') as c:
c.argument('gallery_application_name', options_list=['--application-name'],
help='The name of the gallery Application')
c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'],
help='The name of the gallery Application Version')
with self.argument_context('sig gallery-application version create') as c:
c.argument('package_file_name', help='The name to assign the downloaded package file on the VM. This is limited to 4096 characters.'
'If not specified, the package file will be named the same as the Gallery Application name.')
c.argument('config_file_name', help='The name to assign the downloaded config file on the VM. This is limited to 4096 characters. '
'If not specified, the config file will be named the Gallery Application name appended with "_config"')
for scope in ['create', 'update']:
with self.argument_context('sig gallery-application version {}'.format(scope)) as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.')
c.argument('install_command', help='The path and arguments to install the gallery application.')
c.argument('remove_command', help='The path and arguments to remove the gallery application.')
c.argument('update_command', help='The path and arguments to update the gallery application. If not present,'
' then update operation will invoke remove command on the previous version'
' and install command on the current version of the gallery application.')
c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is'
'going to be replicated to. This property is updatable. Expected value: '
'json-string/json-file/@json-file.')
c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.')
c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines '
'deployed from the latest version of the Image Definition won\'t use this Image Version.',
arg_group='Publishing Profile')
c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be '
'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile')
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys', 'ConfidentialVmEncryptedWithCustomerKey']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed. ConfidentialVmEncryptedWithCustomerKey: An additional encryption type accepted for confidential VM. Disk is encrypted at rest with Customer managed key.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
with self.argument_context('disk-encryption-set create', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.')
c.argument('mi_system_assigned', arg_group='Managed Identity', arg_type=get_three_state_flag(),
help='Provide this flag to use system assigned identity. Check out help for more examples')
c.argument('mi_user_assigned', arg_group='Managed Identity', nargs='+',
help='User Assigned Identity ids to be used for disk encryption set. '
'Check out help for more examples')
with self.argument_context('disk-encryption-set update', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.')
with self.argument_context('disk-encryption-set identity', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('mi_system_assigned', options_list=['--system-assigned'],
arg_group='Managed Identity', arg_type=get_three_state_flag(),
help='Provide this flag to use system assigned identity for disk encryption set. '
'Check out help for more examples')
c.argument('mi_user_assigned', options_list=['--user-assigned'], arg_group='Managed Identity', nargs='*',
help='User Assigned Identity ids to be used for disk encryption set. '
'Check out help for more examples')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
# region Capacity
with self.argument_context('capacity reservation group') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'],
help='The name of the capacity reservation group.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation group create') as c:
c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.')
with self.argument_context('capacity reservation group show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.')
with self.argument_context('capacity reservation group list') as c:
c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.')
c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.')
with self.argument_context('capacity reservation') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'],
help='The name of the capacity reservation group.')
c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'],
help='The name of the capacity reservation.')
c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation create') as c:
c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.')
c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.')
with self.argument_context('capacity reservation show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
# endRegion
# region Restore point
with self.argument_context('restore-point') as c:
c.argument('restore_point_collection_name', options_list=['--collection-name'],
help='The name of the restore point collection.')
with self.argument_context('restore-point create') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the '
'customer wishes to exclude from the restore point. If no disks are specified, all disks will be '
'included.')
c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created')
with self.argument_context('restore-point show') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
c.argument('expand', help='The expand expression to apply on the operation.',
deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.')
with self.argument_context('restore-point delete') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
with self.argument_context('restore-point wait') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
# endRegion
# region Restore point collection
with self.argument_context('restore-point collection create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection',
arg_group='Source')
with self.argument_context('restore-point collection update') as c:
c.argument('tags', tags_type)
with self.argument_context('restore-point collection show') as c:
c.argument('expand', help='The expand expression to apply on the operation.',
deprecate_info=c.deprecate(hide=True))
c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.')
|
def load_arguments(self, _):
# Model imports
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes = self.get_models('HyperVGenerationTypes')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(['ResourceDisk', 'CacheDisk']), min_api='2019-12-01')
license_type = CLIArgumentType(
help="Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for "
"Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, "
"use 'Windows_Client'. For more information see the Azure Windows VM online docs.",
arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE',
'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC',
'None', 'RHEL_ELS_6']))
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes')
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks')
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.')
enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.')
security_type = CLIArgumentType(arg_type=get_enum_type(self.get_models('SecurityTypes')), min_api='2020-12-01', help='Specify the security type of the virtual machine.')
gallery_image_name_type = CLIArgumentType(options_list=['--gallery-image-definition', '-i'], help='The name of the community gallery image definition from which the image versions are to be listed.', id_part='child_name_2')
gallery_image_name_version_type = CLIArgumentType(options_list=['--gallery-image-version', '-e'], help='The name of the gallery image version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
public_gallery_name_type = CLIArgumentType(help='The public name of community gallery.', id_part='child_name_1')
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot', 'source_restore_point')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.')
c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.')
c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
c.argument('secure_vm_disk_encryption_set', min_api='2021-08-01', help='Name or ID of disk encryption set created with ConfidentialVmEncryptedWithCustomerKey encryption type.')
# endregion
# region Disks
with self.argument_context('disk grant-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('secure_vm_guest_state_sas', options_list=['--secure-vm-guest-state-sas', '-s'], min_api='2022-03-02',
action='store_true', validator=validate_secure_vm_guest_state_sas,
help="Get SAS on managed disk with VM guest state. It will be used by default when the create option of disk is 'secureOSUpload'")
# endregion
# region Disks
with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the Compute, Shared or Community Gallery image version from which to create a disk. For details about valid format, please refer to the help sample')
c.ignore('gallery_image_reference_type')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', arg_type=get_enum_type(self.get_models('DiskSecurityTypes', operation_group='disks')), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.')
c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.')
# endregion
# region Disks
with self.argument_context('disk create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c:
c.argument('security_data_uri', min_api='2022-03-02', help='Please specify the blob URI of VHD to be imported into VM guest state')
c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30',
deprecate_info=c.deprecate(target='--for-upload', redirect='--upload-type Upload', hide=True),
help='Create the disk for uploading blobs. Replaced by "--upload-type Upload"')
c.argument('upload_type', arg_type=get_enum_type(['Upload', 'UploadWithSecurityData']), min_api='2018-09-30',
help="Create the disk for upload scenario. 'Upload' is for Standard disk only upload. 'UploadWithSecurityData' is for OS Disk upload along with VM Guest State. Please note the 'UploadWithSecurityData' is not valid for data disk upload, it only to be used for OS Disk upload at present.")
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.')
c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30',
help='Create the snapshot for uploading blobs later on through storage commands. Run "az snapshot grant-access --access-level Write" to retrieve the snapshot\'s SAS token.')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).')
c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', security_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('disks', nargs='*', help="One or more names or IDs of the managed disk (space-delimited).",
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm extension show') as c:
c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.')
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
c.argument('edge_zone', edge_zone_type)
c.argument('architecture', help='The name of architecture. ', arg_type=get_enum_type(["x64", "Arm64"]))
with self.argument_context('vm image list-offers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-skus') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-publishers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.')
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']))
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "
"value is 2020-11-01.")
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('security_type', security_type)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('enable_secure_boot', enable_secure_boot_type)
c.argument('enable_vtpm', enable_vtpm_type)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.')
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.')
run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine')
for scope in ['create', 'update']:
with self.argument_context('vm run-command {}'.format(scope)) as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.')
c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
'will complete as soon as the script starts and will not wait for script to complete.')
c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.')
c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
c.argument('output_blob_uri', help='Specify the Azure storage blob where script output stream will be uploaded.')
c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.')
with self.argument_context('vm run-command delete') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
with self.argument_context('vm run-command list') as c:
c.argument('vm_name', run_cmd_vm_name, id_part=None)
c.argument('expand', help='The expand expression to apply on the operation.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('vm run-command show') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('command_id', help='The command id.')
with self.argument_context('vm run-command wait') as c:
c.argument('vm_name', run_cmd_vm_name)
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('command_id', help='The command id.')
run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.')
for scope in ['create', 'update']:
with self.argument_context('vmss run-command {}'.format(scope)) as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('script', help='Contain the powershell or bash script to execute on the VM.')
c.argument('script_uri',
help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ')
c.argument('command_id',
help='Specify a command id of predefined script. All command ids can be listed using "list" command.')
c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.')
c.argument('protected_parameters', nargs='+',
help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.')
c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning '
'will complete as soon as the script starts and will not wait for script to complete.')
c.argument('run_as_user',
help='By default script process runs under system/root user. Specify custom user to host the process.')
c.argument('run_as_password',
help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ')
c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.')
c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.')
c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.')
with self.argument_context('vmss run-command delete') as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
with self.argument_context('vmss run-command list') as c:
c.argument('vmss_name', run_cmd_vmss_name, id_part=None)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('expand', help='The expand expression to apply on the operation.')
with self.argument_context('vmss run-command show') as c:
c.argument('vmss_name', run_cmd_vmss_name)
c.argument('instance_id', help='The instance ID of the virtual machine.')
c.argument('run_command_name', run_cmd_name_type)
c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='The instance view of a run command.')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm application set', 'vmss application set']:
with self.argument_context(scope) as c:
c.argument('vm', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.")
c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.')
c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*',
help='Space-separated application configuration overrides for each application version ids. '
'It should have the same number of items as the application version ids. Null is available for a application '
'which does not have a configuration override.')
c.argument('treat_deployment_as_failure', nargs='*', help="Space-separated list of true or false corresponding to the application version ids. If set to true, failure to install or update gallery application version operation will fail this operation")
for scope in ['vm application list', 'vmss application list']:
with self.argument_context(scope) as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
c.argument('vmss_name', vmss_name_type, id_part=None)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
c.argument('accept_term', action='store_true', help="Accept the license agreement and privacy statement.")
c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable the default behavior of installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.')
c.argument('os_disk_security_encryption_type', arg_type=get_enum_type(self.get_models('SecurityEncryptionTypes')), min_api='2021-11-01', help='Specify the encryption type of the OS managed disk.')
c.argument('os_disk_secure_vm_disk_encryption_set', min_api='2021-11-01', help='Specify the customer managed disk encryption set resource ID or name for the managed disk that is used for customer managed key encrypted Confidential VM OS disk and VM guest blob.')
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type,
help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use "<data_disk>=Delete <data_disk2>=Detach" to configure each disk')
if scope == 'vmss create':
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', help='Specify whether OS Disk should be deleted or detached upon VMSS Flexdeletion (This feature is available for VMSS with Flexible OrchestrationMode only).')
c.argument('data_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', help='Specify whether data disk should be deleted or detached upon VMSS Flexdeletion (This feature is available for VMSS with Flexible OrchestrationMode only)')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group,
help="Scope that the system assigned identity can access. ")
c.ignore('identity_role_id')
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity',
help='Role name or id the system assigned identity will have. ')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('identity_role', options_list=['--role'], help="Role name or id the system assigned identity will have")
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('license_type', license_type)
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'],
help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.',
min_api='2021-04-01', is_preview=True)
c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available')
c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.')
with self.argument_context('vm update') as c:
c.argument('license_type', license_type)
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig show') as c:
c.argument('select', help='The select expression to apply on the operation.')
c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
c.argument('publisher_uri', help='Community gallery publisher uri.')
c.argument('publisher_contact', options_list=['--publisher-email'], help='Community gallery publisher contact email.')
c.argument('eula', help='Community gallery publisher eula.')
c.argument('public_name_prefix', help='Community gallery public name prefix.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.')
c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `<os_cvm_encryption_type>,<os_cvm_des>`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.')
c.argument('virtual_machine', help='Resource id of VM source')
c.argument('image_version', help='Resource id of gallery image version source')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*',
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
with self.argument_context('sig show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
with self.argument_context('sig list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-definition show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
with self.argument_context('sig image-definition list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig image-version show-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
c.argument('gallery_image_version_name', gallery_image_name_version_type)
with self.argument_context('sig image-version list-community') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('public_gallery_name', public_gallery_name_type)
c.argument('gallery_image_name', gallery_image_name_type)
c.argument('marker', arg_type=marker_type)
c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.')
with self.argument_context('sig share enable-community') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
c.argument('op_type', default='EnableCommunity', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
# endregion
# region Gallery applications
with self.argument_context('sig gallery-application') as c:
c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'],
help='The name of the gallery Application')
with self.argument_context('sig gallery-application create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('description', help='The description of this gallery Application Definition resource. '
'This property is updatable.')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you '
'to specify the supported type of the OS that application is built for. <br><br> Possible values '
'are: <br><br> **Windows** <br><br> **Linux**')
with self.argument_context('sig gallery-application update') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('description', help='The description of this gallery Application Definition resource. '
'This property is updatable.')
with self.argument_context('sig gallery-application version') as c:
c.argument('gallery_application_name', options_list=['--application-name'],
help='The name of the gallery Application')
c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'],
help='The name of the gallery Application Version')
with self.argument_context('sig gallery-application version create') as c:
c.argument('package_file_name', help='The name to assign the downloaded package file on the VM. This is limited to 4096 characters.'
'If not specified, the package file will be named the same as the Gallery Application name.')
c.argument('config_file_name', help='The name to assign the downloaded config file on the VM. This is limited to 4096 characters. '
'If not specified, the config file will be named the Gallery Application name appended with "_config"')
for scope in ['create', 'update']:
with self.argument_context('sig gallery-application version {}'.format(scope)) as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.')
c.argument('install_command', help='The path and arguments to install the gallery application.')
c.argument('remove_command', help='The path and arguments to remove the gallery application.')
c.argument('update_command', help='The path and arguments to update the gallery application. If not present,'
' then update operation will invoke remove command on the previous version'
' and install command on the current version of the gallery application.')
c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is'
'going to be replicated to. This property is updatable. Expected value: '
'json-string/json-file/@json-file.')
c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.')
c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines '
'deployed from the latest version of the Image Definition won\'t use this Image Version.',
arg_group='Publishing Profile')
c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be '
'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile')
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys', 'ConfidentialVmEncryptedWithCustomerKey']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed. ConfidentialVmEncryptedWithCustomerKey: An additional encryption type accepted for confidential VM. Disk is encrypted at rest with Customer managed key.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
with self.argument_context('disk-encryption-set create', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.')
c.argument('mi_system_assigned', arg_group='Managed Identity', arg_type=get_three_state_flag(),
help='Provide this flag to use system assigned identity. Check out help for more examples')
c.argument('mi_user_assigned', arg_group='Managed Identity', nargs='+',
help='User Assigned Identity ids to be used for disk encryption set. '
'Check out help for more examples')
with self.argument_context('disk-encryption-set update', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.')
with self.argument_context('disk-encryption-set identity', operation_group='disk_encryption_sets',
min_api='2022-03-02') as c:
c.argument('mi_system_assigned', options_list=['--system-assigned'],
arg_group='Managed Identity', arg_type=get_three_state_flag(),
help='Provide this flag to use system assigned identity for disk encryption set. '
'Check out help for more examples')
c.argument('mi_user_assigned', options_list=['--user-assigned'], arg_group='Managed Identity', nargs='*',
help='User Assigned Identity ids to be used for disk encryption set. '
'Check out help for more examples')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
# region Capacity
with self.argument_context('capacity reservation group') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'],
help='The name of the capacity reservation group.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation group create') as c:
c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.')
with self.argument_context('capacity reservation group show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.')
with self.argument_context('capacity reservation group list') as c:
c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.')
c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.')
with self.argument_context('capacity reservation') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'],
help='The name of the capacity reservation group.')
c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'],
help='The name of the capacity reservation.')
c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation create') as c:
c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.')
c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.')
with self.argument_context('capacity reservation show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
# endRegion
# region Restore point
with self.argument_context('restore-point') as c:
c.argument('restore_point_collection_name', options_list=['--collection-name'],
help='The name of the restore point collection.')
with self.argument_context('restore-point create') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the '
'customer wishes to exclude from the restore point. If no disks are specified, all disks will be '
'included.')
c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created')
with self.argument_context('restore-point show') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
c.argument('expand', help='The expand expression to apply on the operation.',
deprecate_info=c.deprecate(hide=True))
c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.')
with self.argument_context('restore-point delete') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
with self.argument_context('restore-point wait') as c:
c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'],
help='The name of the restore point.')
# endRegion
# region Restore point collection
with self.argument_context('restore-point collection create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection',
arg_group='Source')
with self.argument_context('restore-point collection update') as c:
c.argument('tags', tags_type)
with self.argument_context('restore-point collection show') as c:
c.argument('expand', help='The expand expression to apply on the operation.',
deprecate_info=c.deprecate(hide=True))
c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.')
|
27,123 |
def exactly_one(*args) -> bool:
"""
Returns True if exactly one of *args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
if is_container(args[0]):
raise ValueError(
"Not supported for iterable args. Use `*` to unpack your iterable in the function call."
)
def is_set(val):
if isinstance(val, ArgNotSet):
return False
else:
return bool(val)
return sum(map(is_set, args)) == 1
|
def exactly_one(*args) -> bool:
"""
Returns True if exactly one of *args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
if is_container(args[0]):
raise ValueError(
"Not supported for iterable args. Use `*` to unpack your iterable in the function call."
)
def is_set(val):
if isinstance(val, ArgNotSet):
return False
else:
return bool(val)
return sum(a is not NOT_SET and bool(a) for a in args) == 1
|
32,514 |
def test_module():
try:
sdclient = SdMonitorClient(token=MONITORKEY, sdc_url=URL)
ok, res = sdclient.get_alerts()
if ok is True:
demisto.results('ok')
except Exception as e:
LOG(e)
return_error(e.message)
|
def test_module():
try:
sdclient = SdMonitorClient(token=MONITORKEY, sdc_url=URL)
ok, res = sdclient.get_alerts()
if ok is True:
demisto.results('ok')
except Exception as e:
demisto.info()
return_error(e.message)
|
53,442 |
def divide(x, y):
result = 0
try:
result = x / y
except ZeroDivisionError as exc:
raise Exception("Can't divide by zero!") from exc
return result
|
def divide(x, y):
result = 0
try:
result = x / y
except ZeroDivisionError as exc:
raise ValueError(f"Division by zero when dividing {x} by {y} !") from exc
return result
|
37,156 |
def schedule_circuit(circuit: QuantumCircuit,
schedule_config: ScheduleConfig,
method: Optional[str] = None) -> Schedule:
"""
Basic scheduling pass from a circuit to a pulse Schedule, using the backend. If no method is
specified, then a basic, as late as possible scheduling pass is performed, i.e. pulses are
scheduled to occur as late as possible.
Supported methods:
* ``as_soon_as_possible``: Schedule pulses greedily, as early as possible on a qubit resource.
(alias: ``asap``)
* ``as_late_as_possible``: Schedule pulses late-- keep qubits in the ground state when possible.
(alias: ``alap``)
Args:
circuit: The quantum circuit to translate.
schedule_config: Backend specific parameters used for building the Schedule.
method: The scheduling pass method to use.
Returns:
Schedule corresponding to the input circuit.
Raises:
QiskitError: If method isn't recognized.
"""
methods = {
'as_soon_as_possible': as_soon_as_possible,
'asap': as_soon_as_possible,
'as_late_as_possible': as_late_as_possible,
'alap': as_late_as_possible
}
if method is None:
method = 'as_late_as_possible'
try:
return methods[method](circuit, schedule_config)
except KeyError:
raise QiskitError("Scheduling method {method} isn't recognized.".format(method=method))
|
def schedule_circuit(circuit: QuantumCircuit,
schedule_config: ScheduleConfig,
method: Optional[str] = None) -> Schedule:
"""
Basic scheduling pass from a circuit to a pulse Schedule, using the backend. If no method is
specified, then a basic, as late as possible scheduling pass is performed, i.e. pulses are
scheduled to occur as late as possible.
Supported methods:
* ``as_soon_as_possible``: Schedule pulses greedily, as early as possible on a qubit resource.
(alias: ``asap``)
* ``'as_late_as_possible'``: Schedule pulses late-- keep qubits in the ground state when possible.
(alias: ``alap``)
Args:
circuit: The quantum circuit to translate.
schedule_config: Backend specific parameters used for building the Schedule.
method: The scheduling pass method to use.
Returns:
Schedule corresponding to the input circuit.
Raises:
QiskitError: If method isn't recognized.
"""
methods = {
'as_soon_as_possible': as_soon_as_possible,
'asap': as_soon_as_possible,
'as_late_as_possible': as_late_as_possible,
'alap': as_late_as_possible
}
if method is None:
method = 'as_late_as_possible'
try:
return methods[method](circuit, schedule_config)
except KeyError:
raise QiskitError("Scheduling method {method} isn't recognized.".format(method=method))
|
20,493 |
def fix_double_membership(cr):
# avoid error raised by new function '_check_one_user_type'
# assuming that group_public < group_portal < group_user
# this script keept the highest group, if a user belong to many
# groups
confs = [
("group_public", "group_portal"),
("group_public", "group_user"),
("group_portal", "group_user"),
]
for conf in confs:
group_to_remove = conf[0]
group_to_keep = conf[1]
openupgrade.logged_query(
cr, """
DELETE FROM res_groups_users_rel
WHERE
gid = (
SELECT res_id
FROM ir_model_data
WHERE module = 'base' AND name = '%s'
)
AND uid IN (
SELECT uid FROM res_groups_users_rel WHERE gid IN (
SELECT res_id
FROM ir_model_data
WHERE module = 'base'
AND name IN ('%s', '%s')
)
GROUP BY uid
HAVING count(*) > 1
);
""", (group_to_remove, group_to_remove, group_to_keep)
)
|
def fix_double_membership(cr):
# avoid error raised by new function '_check_one_user_type'
# assuming that group_public < group_portal < group_user
# this script keept the highest group, if a user belong to many
# groups
confs = [
("group_public", "group_portal"),
("group_public", "group_user"),
("group_portal", "group_user"),
]
for conf in confs:
group_to_remove = conf[0]
group_to_keep = conf[1]
openupgrade.logged_query(
cr, """
DELETE FROM res_groups_users_rel
WHERE
gid = (
SELECT res_id
FROM ir_model_data
WHERE module = 'base' AND name = %s
)
AND uid IN (
SELECT uid FROM res_groups_users_rel WHERE gid IN (
SELECT res_id
FROM ir_model_data
WHERE module = 'base'
AND name IN ('%s', '%s')
)
GROUP BY uid
HAVING count(*) > 1
);
""", (group_to_remove, group_to_remove, group_to_keep)
)
|
40,152 |
def load_config(path=None):
"""Load the config file located at `path`.
The file must be an ini file and is read into an `config.Config` instance.
This instance can be accessed with `config.cfg` after calling this function.
"""
if path is None:
path = Path(__file__).parent / 'config/main.cfg'
parser = ConfigParser()
with open(path) as f:
parser.read_file(f)
# TODO We should really not use private API of ConfigParser but whatever
parsed_sections = deepcopy(parser._sections)
parsed_sections['unpack']['whitelist'] = _parse_comma_separated_list(parser._sections['unpack']['whitelist'])
for plugin_set in parsed_sections['default-plugins']:
parsed_sections['default-plugins'][plugin_set] = _parse_comma_separated_list(parser._sections['default-plugins'][plugin_set])
# hyphens may not be contained in identifiers
# plugin names may also not contain hyphens, so this is fine
_replace_hyphens_with_underscores(parsed_sections)
global _cfg
global _configparser_cfg
_configparser_cfg = parser
_cfg = Config(**parsed_sections)
|
def load_config(path=None):
"""Load the config file located at `path`.
The file must be an ini file and is read into an `config.Config` instance.
This instance can be accessed with `config.cfg` after calling this function.
"""
if path is None:
path = Path(__file__).parent / 'config/main.cfg'
parser = ConfigParser()
with open(path) as f:
parser.read_file(f)
parsed_sections = {key: dict(section) for key, section in parser.items()}
parsed_sections['unpack']['whitelist'] = _parse_comma_separated_list(parser._sections['unpack']['whitelist'])
for plugin_set in parsed_sections['default-plugins']:
parsed_sections['default-plugins'][plugin_set] = _parse_comma_separated_list(parser._sections['default-plugins'][plugin_set])
# hyphens may not be contained in identifiers
# plugin names may also not contain hyphens, so this is fine
_replace_hyphens_with_underscores(parsed_sections)
global _cfg
global _configparser_cfg
_configparser_cfg = parser
_cfg = Config(**parsed_sections)
|
5,831 |
def iircomb(w0, Q, ftype='notch', fs=2.0, pass_zero=False):
"""
Design IIR notching or peaking digital comb filter.
A notching comb filter consists of regularly-spaced band-stop filters with
a narrow bandwidth (high quality factor). Each rejects a narrow frequency
band and leaves the rest of the spectrum little changed.
A peaking comb filter consists of regularly-spaced band-pass filters with
a narrow bandwidth (high quality factor). Each rejects components outside
a narrow frequency band.
Parameters
----------
w0 : float
The fundamental frequency of the comb filter (the spacing between its
peaks). This must evenly divide the sampling frequency. If `fs` is
specified, this is in the same units as `fs`. By default, it is
a normalized scalar that must satisfy ``0 < w0 < 1``, with
``w0 = 1`` corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : {'notch', 'peak'}
The type of comb filter generated by the function. If 'notch', then
the Q factor applies to the notches. If 'peak', then the Q factor
applies to the peaks. Default is 'notch'.
fs : float, optional
The sampling frequency of the signal. Default is 2.0.
pass_zero : bool, optional
If False (default), the notches (nulls) of the filter are centered on
frequencies [0, w0, 2*w0, ...], and the peaks are centered on the
midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered
on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa.
.. versionadded:: 1.9.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
Raises
------
ValueError
If `w0` is less than or equal to 0 or greater than or equal to
``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
is not 'notch' or 'peak'
See Also
--------
iirnotch
iirpeak
Notes
-----
For implementation details, see [1]_. The TF implementation of the
comb filter is numerically stable even at higher orders due to the
use of a single repeated pole, which won't suffer from precision loss.
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996, ch. 11, "Digital Filter Design"
Examples
--------
Design and plot notching comb filter at 20 Hz for a
signal sampled at 200 Hz, using quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 20.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notching comb filter
>>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-30, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
Design and plot peaking comb filter at 250 Hz for a
signal sampled at 1000 Hz, using quality factor Q = 30
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 250.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peaking filter
>>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-80, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
# Convert w0, Q, and fs to float
w0 = float(w0)
Q = float(Q)
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
if not 0 < w0 < fs / 2:
raise ValueError("w0 must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, w0))
if ftype not in ('notch', 'peak'):
raise ValueError('ftype must be either notch or peak.')
# Compute the order of the filter
N = round(fs / w0)
# Check for cutoff frequency divisibility
if abs(w0 - fs/N)/fs > 1e-14:
raise ValueError('fs must be divisible by w0.')
# Compute frequency in radians and filter bandwidth
# Eq. 11.3.1 (p. 574) from reference [1]
w0 = (2 * np.pi * w0) / fs
w_delta = w0 / Q
# Define base gain values depending on notch or peak filter
# Compute -3dB attenuation
# Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
if ftype == 'notch':
G0, G = 1, 0
elif ftype == 'peak':
G0, G = 0, 1
GB = 1 / np.sqrt(2)
# Compute beta
# Eq. 11.5.3 (p. 591) from reference [1]
beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4)
# Compute filter coefficients
# Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
ax = (1 - beta) / (1 + beta)
bx = (G0 + G * beta) / (1 + beta)
cx = (G0 - G * beta) / (1 + beta)
# Compute numerator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# b - cz^-N or b + cz^-N
b = np.zeros(N + 1)
b[0] = bx
if (ftype == 'notch' and not pass_zero) or (ftype == 'peak' and pass_zero):
b[-1] = -cx
else:
b[-1] = +cx
# Compute denominator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# 1 - az^-N or 1 + az^-N
a = np.zeros(N + 1)
a[0] = 1
if (ftype == 'notch' and not pass_zero) or (ftype == 'peak' and pass_zero):
a[-1] = -ax
else:
a[-1] = +ax
return b, a
|
def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False):
"""
Design IIR notching or peaking digital comb filter.
A notching comb filter consists of regularly-spaced band-stop filters with
a narrow bandwidth (high quality factor). Each rejects a narrow frequency
band and leaves the rest of the spectrum little changed.
A peaking comb filter consists of regularly-spaced band-pass filters with
a narrow bandwidth (high quality factor). Each rejects components outside
a narrow frequency band.
Parameters
----------
w0 : float
The fundamental frequency of the comb filter (the spacing between its
peaks). This must evenly divide the sampling frequency. If `fs` is
specified, this is in the same units as `fs`. By default, it is
a normalized scalar that must satisfy ``0 < w0 < 1``, with
``w0 = 1`` corresponding to half of the sampling frequency.
Q : float
Quality factor. Dimensionless parameter that characterizes
notch filter -3 dB bandwidth ``bw`` relative to its center
frequency, ``Q = w0/bw``.
ftype : {'notch', 'peak'}
The type of comb filter generated by the function. If 'notch', then
the Q factor applies to the notches. If 'peak', then the Q factor
applies to the peaks. Default is 'notch'.
fs : float, optional
The sampling frequency of the signal. Default is 2.0.
pass_zero : bool, optional
If False (default), the notches (nulls) of the filter are centered on
frequencies [0, w0, 2*w0, ...], and the peaks are centered on the
midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered
on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa.
.. versionadded:: 1.9.0
Returns
-------
b, a : ndarray, ndarray
Numerator (``b``) and denominator (``a``) polynomials
of the IIR filter.
Raises
------
ValueError
If `w0` is less than or equal to 0 or greater than or equal to
``fs/2``, if `fs` is not divisible by `w0`, if `ftype`
is not 'notch' or 'peak'
See Also
--------
iirnotch
iirpeak
Notes
-----
For implementation details, see [1]_. The TF implementation of the
comb filter is numerically stable even at higher orders due to the
use of a single repeated pole, which won't suffer from precision loss.
References
----------
.. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing",
Prentice-Hall, 1996, ch. 11, "Digital Filter Design"
Examples
--------
Design and plot notching comb filter at 20 Hz for a
signal sampled at 200 Hz, using quality factor Q = 30
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fs = 200.0 # Sample frequency (Hz)
>>> f0 = 20.0 # Frequency to be removed from signal (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design notching comb filter
>>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 100])
>>> ax[0].set_ylim([-30, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 100])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
Design and plot peaking comb filter at 250 Hz for a
signal sampled at 1000 Hz, using quality factor Q = 30
>>> fs = 1000.0 # Sample frequency (Hz)
>>> f0 = 250.0 # Frequency to be retained (Hz)
>>> Q = 30.0 # Quality factor
>>> # Design peaking filter
>>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True)
>>> # Frequency response
>>> freq, h = signal.freqz(b, a, fs=fs)
>>> response = abs(h)
>>> # To avoid divide by zero when graphing
>>> response[response == 0] = 1e-20
>>> # Plot
>>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
>>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue')
>>> ax[0].set_title("Frequency Response")
>>> ax[0].set_ylabel("Amplitude (dB)", color='blue')
>>> ax[0].set_xlim([0, 500])
>>> ax[0].set_ylim([-80, 10])
>>> ax[0].grid(True)
>>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green')
>>> ax[1].set_ylabel("Angle (degrees)", color='green')
>>> ax[1].set_xlabel("Frequency (Hz)")
>>> ax[1].set_xlim([0, 500])
>>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
>>> ax[1].set_ylim([-90, 90])
>>> ax[1].grid(True)
>>> plt.show()
"""
# Convert w0, Q, and fs to float
w0 = float(w0)
Q = float(Q)
fs = float(fs)
# Check for invalid cutoff frequency or filter type
ftype = ftype.lower()
if not 0 < w0 < fs / 2:
raise ValueError("w0 must be between 0 and {}"
" (nyquist), but given {}.".format(fs / 2, w0))
if ftype not in ('notch', 'peak'):
raise ValueError('ftype must be either notch or peak.')
# Compute the order of the filter
N = round(fs / w0)
# Check for cutoff frequency divisibility
if abs(w0 - fs/N)/fs > 1e-14:
raise ValueError('fs must be divisible by w0.')
# Compute frequency in radians and filter bandwidth
# Eq. 11.3.1 (p. 574) from reference [1]
w0 = (2 * np.pi * w0) / fs
w_delta = w0 / Q
# Define base gain values depending on notch or peak filter
# Compute -3dB attenuation
# Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1]
if ftype == 'notch':
G0, G = 1, 0
elif ftype == 'peak':
G0, G = 0, 1
GB = 1 / np.sqrt(2)
# Compute beta
# Eq. 11.5.3 (p. 591) from reference [1]
beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4)
# Compute filter coefficients
# Eq 11.5.1 (p. 590) variables a, b, c from reference [1]
ax = (1 - beta) / (1 + beta)
bx = (G0 + G * beta) / (1 + beta)
cx = (G0 - G * beta) / (1 + beta)
# Compute numerator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# b - cz^-N or b + cz^-N
b = np.zeros(N + 1)
b[0] = bx
if (ftype == 'notch' and not pass_zero) or (ftype == 'peak' and pass_zero):
b[-1] = -cx
else:
b[-1] = +cx
# Compute denominator coefficients
# Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1]
# 1 - az^-N or 1 + az^-N
a = np.zeros(N + 1)
a[0] = 1
if (ftype == 'notch' and not pass_zero) or (ftype == 'peak' and pass_zero):
a[-1] = -ax
else:
a[-1] = +ax
return b, a
|
31,594 |
def create_release_command():
args = demisto.args()
tag_name = args.get('tag_name')
data = {
'tag_name': tag_name,
'name': args.get('name'),
'body': args.get('body'),
'draft': argToBoolean(args.get('draft'))
}
response = http_request('POST', url_suffix=RELEASE_SUFFIX, data=data)
release_url = response.get('html_url')
outputs = {
'id': response.get('id'),
'draft': response.get('draft'),
'html_url': response.get('html_url'),
'url': response.get('url')
}
return_results(CommandResults(
outputs_prefix='GitHub.Release',
outputs=outputs,
outputs_key_field='id',
readable_output=f'Release {tag_name} created successfully for repo {REPOSITORY}: {release_url}',
raw_response=response
))
|
def create_release_command():
args = demisto.args()
tag_name = args.get('tag_name')
data = {
'tag_name': tag_name,
'name': args.get('name'),
'body': args.get('body'),
'draft': argToBoolean(args.get('draft'))
}
response = http_request('POST', url_suffix=RELEASE_SUFFIX, data=data)
release_url = response.get('html_url')
outputs = {
'id': response.get('id'),
'draft': response.get('draft'),
'html_url': response.get('html_url'),
'url': response.get('url'),
}
return_results(CommandResults(
outputs_prefix='GitHub.Release',
outputs=outputs,
outputs_key_field='id',
readable_output=f'Release {tag_name} created successfully for repo {REPOSITORY}: {release_url}',
raw_response=response
))
|
31,483 |
def list_files_command():
args = demisto.args()
path = args.get('path', '')
organization = args.get('organization')
repository = args.get('repository')
if organization and repository:
suffix = f'/repos/{organization}/{repository}/contents/{path}'
else:
suffix = f'{USER_SUFFIX}/contents/{path}'
res = http_request(method='GET', url_suffix=suffix)
ec_object = []
for file in res:
ec_object.append({
'Type': file.get('type'),
'Name': file.get('name'),
'Size': file.get('size'),
'Path': file.get('path'),
'DownloadUrl': file.get('download_url')
})
ec = {'GitHub.File(val.Name === obj.Name && val.Path === obj.Path)': ec_object}
human_readable = tableToMarkdown(f'Files in path: {path}', ec_object, removeNull=True, headers=FILE_HEADERS)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=res)
|
def list_files_command():
args = demisto.args()
path = args.get('path', '')
organization = args.get('organization')
repository = args.get('repository')
if organization and repository:
suffix = f'/repos/{organization}/{repository}/contents/{path}'
else:
suffix = f'{USER_SUFFIX}/contents/{path}'
res = http_request(method='GET', url_suffix=suffix)
ec_object = []
for file in res:
ec_object.append({
'Type': file.get('type'),
'Name': file.get('name'),
'Size': file.get('size'),
'Path': file.get('path'),
'DownloadUrl': file.get('download_url')
})
ec = {'GitHub.File(val.Path && val.Path === obj.Path)': ec_object}
human_readable = tableToMarkdown(f'Files in path: {path}', ec_object, removeNull=True, headers=FILE_HEADERS)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=res)
|
7,523 |
def test_sigma_clip_masked_data_values():
"""
Test that the data values & type returned by sigma_clip are the same as
its input when using masked=True (rather than being upcast to float64 &
containing NaNs as in issue #10605) and also that the input data get
copied or referenced as appropriate.
"""
data = np.array([-2, 5, -5, -6, 20, 14, 1])
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
# (The fact that the arrays share memory probably also means they're the
# same, but doesn't strictly prove it, eg. one could be reversed.)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
|
def test_sigma_clip_masked_data_values():
"""
Test that the data values & type returned by sigma_clip are the same as
its input when using masked=True (rather than being upcast to float64 &
containing NaNs as in issue #10605) and also that the input data get
copied or referenced as appropriate.
"""
data = np.array([-2, 5, -5, -6, 20, 14, 1])
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=True)
assert result.dtype == data.dtype
assert_equal(result.data, data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
# (The fact that the arrays share memory probably also means they're the
# same, but doesn't strictly prove it, eg. one could be reversed.)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=True)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert not np.shares_memory(result.data, data)
result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True,
copy=False)
assert result.dtype == data.dtype
assert np.all(result.data == data)
assert np.shares_memory(result.data, data)
|
41,427 |
def test_smooth():
"""Test if you can smooth the periodogram and check any pitfalls
"""
lc = LightCurve(time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000)+0.1)
p = lc.to_periodogram()
# Test boxkernel and logmedian methods
assert all(p.smooth(method='boxkernel').frequency == p.frequency)
assert all(p.smooth(method='logmedian').frequency == p.frequency)
# Check output units
assert p.smooth().power.unit == p.power.unit
# Can't pass filter_width below 0.
with pytest.raises(ValueError) as err:
p.smooth(method='boxkernel', filter_width=-5.)
# Can't pass a filter_width in the wrong units
with pytest.raises(ValueError) as err:
p.smooth(method='boxkernel', filter_width=5.*u.day)
assert err.value.args[0] == 'the `filter_width` parameter must have frequency units.'
# Can't (yet) use a periodogram with a non-evenly spaced frequencies
with pytest.raises(ValueError) as err:
p = np.arange(1, 100)
p = lc.to_periodogram(period=p)
p.smooth()
# Check logmedian doesn't work if I give the filter width units
with pytest.raises(ValueError) as err:
p.smooth(method='logmedian', filter_width=5.*u.day)
# Check logmedian smooth that the mean of the smoothed power should be consistent with the mean of the power
assert np.isclose(np.mean(p.smooth(method='logmedian').power.value), np.mean(p.power), atol=0.05)
|
def test_smooth():
"""Test if you can smooth the periodogram and check any pitfalls
"""
lc = LightCurve(time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000)+0.1)
p = lc.to_periodogram()
# Test boxkernel and logmedian methods
assert all(p.smooth(method='boxkernel').frequency == p.frequency)
assert all(p.smooth(method='logmedian').frequency == p.frequency)
# Check output units
assert p.smooth().power.unit == p.power.unit
# Can't pass filter_width below 0.
with pytest.raises(ValueError) as err:
p.smooth(method='boxkernel', filter_width=-5.)
# Can't pass a filter_width in the wrong units
with pytest.raises(ValueError) as err:
p.smooth(method='boxkernel', filter_width=5.*u.day)
assert err.value.args[0] == 'the `filter_width` parameter must have frequency units.'
# Can't (yet) use a periodogram with a non-evenly spaced frequencies
with pytest.raises(ValueError) as err:
p = np.arange(1, 100)
p = lc.to_periodogram(period=p)
p.smooth()
# Check logmedian doesn't work if I give the filter width units
with pytest.raises(ValueError) as err:
p.smooth(method='logmedian', filter_width=5.*u.day)
# Check logmedian smooth that the mean of the smoothed power should be consistent with the mean of the power
assert np.isclose(np.mean(p.smooth(method='logmedian').power.value), np.mean(p.power.value), atol=0.05)
|
3,667 |
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
|
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 0 for the bin width.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
|
42,115 |
def plot_param_importances(
study: Study,
evaluator: Optional[BaseImportanceEvaluator] = None,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot hyperparameter importances.
Example:
The following code snippet shows how to plot hyperparameter importances.
.. plotly::
import optuna
def objective(trial):
x = trial.suggest_int("x", 0, 2)
y = trial.suggest_float("y", -1.0, 1.0)
z = trial.suggest_float("z", 0.0, 1.5)
return x ** 2 + y ** 3 - z ** 4
sampler = optuna.samplers.RandomSampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=100)
fig = optuna.visualization.plot_param_importances(study)
fig.show()
.. seealso::
This function visualizes the results of :func:`optuna.importance.get_param_importances`.
Args:
study:
An optimized study.
evaluator:
An importance evaluator object that specifies which algorithm to base the importance
assessment on.
Defaults to
:class:`~optuna.importance.FanovaImportanceEvaluator`.
params:
A list of names of parameters to assess.
If :obj:`None`, all parameters that are present in all of the completed trials are
assessed.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective
optimization. For example, to get the hyperparameter importance of the first
objective, use ``target=lambda t: t.values[0]`` for the target parameter.
target_name:
Target's name to display on the axis label.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
importances_info = _get_importances_info(study, evaluator, params, target, target_name)
layout = go.Layout(
title=importances_info.title,
xaxis={"title": importances_info.x_axis_name},
yaxis={"title": importances_info.y_axis_name},
showlegend=False,
)
param_names = importances_info.param_names
importance_values = importances_info.importance_values
if len(importance_values) == 0:
return go.Figure(data=[], layout=layout)
hovertemplate = [
_make_hovertext(param_name, importance, study)
for (param_name, importance) in zip(param_names, importance_values)
]
fig = go.Figure(
data=[
go.Bar(
x=importance_values,
y=param_names,
text=importances_info.importance_labels,
textposition="outside",
cliponaxis=False, # Ensure text is not clipped.
hovertemplate=hovertemplate,
marker_color=plotly.colors.sequential.Blues[-4],
orientation="h",
)
],
layout=layout,
)
return fig
|
def plot_param_importances(
study: Study,
evaluator: Optional[BaseImportanceEvaluator] = None,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot hyperparameter importances.
Example:
The following code snippet shows how to plot hyperparameter importances.
.. plotly::
import optuna
def objective(trial):
x = trial.suggest_int("x", 0, 2)
y = trial.suggest_float("y", -1.0, 1.0)
z = trial.suggest_float("z", 0.0, 1.5)
return x ** 2 + y ** 3 - z ** 4
sampler = optuna.samplers.RandomSampler(seed=10)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=100)
fig = optuna.visualization.plot_param_importances(study)
fig.show()
.. seealso::
This function visualizes the results of :func:`optuna.importance.get_param_importances`.
Args:
study:
An optimized study.
evaluator:
An importance evaluator object that specifies which algorithm to base the importance
assessment on.
Defaults to
:class:`~optuna.importance.FanovaImportanceEvaluator`.
params:
A list of names of parameters to assess.
If :obj:`None`, all parameters that are present in all of the completed trials are
assessed.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective
optimization. For example, to get the hyperparameter importance of the first
objective, use ``target=lambda t: t.values[0]`` for the target parameter.
target_name:
Target's name to display on the axis label.
Returns:
A :class:`plotly.graph_objs.Figure` object.
"""
_imports.check()
importances_info = _get_importances_info(study, evaluator, params, target, target_name)
layout = go.Layout(
title=importances_info.title,
xaxis={"title": importances_info.x_axis_name},
yaxis={"title": importances_info.y_axis_name},
showlegend=False,
)
param_names = importances_info.param_names
importance_values = importances_info.importance_values
if len(importance_values) == 0:
return go.Figure(data=[], layout=layout)
hovertemplate = [
_make_hovertext(param_name, importance, study)
for param_name, importance in zip(param_names, importance_values)
]
fig = go.Figure(
data=[
go.Bar(
x=importance_values,
y=param_names,
text=importances_info.importance_labels,
textposition="outside",
cliponaxis=False, # Ensure text is not clipped.
hovertemplate=hovertemplate,
marker_color=plotly.colors.sequential.Blues[-4],
orientation="h",
)
],
layout=layout,
)
return fig
|
20,083 |
def _add_monitoring_data(cluster_nodes: dict):
"""Add metrics data and information on services for the cluster nodes."""
query_string = ' or '.join(QUERY_STRINGS.values())
global_results = prometheus_query(
query_string=query_string,
logger=current_app.logger,
timeout=config.monitoring_timeout,
)
# find unexpected metrics
unexpected_metrics = [
result for result in global_results
if not _host_matches(result.get('metric'), cluster_nodes.keys())
]
if unexpected_metrics:
current_app.logger.warning(
'These metrics do not match monitored IP address%s (%s): %s',
'' if len(cluster_nodes.keys()) == 1 else 'es',
', '.join(cluster_nodes.keys()),
unexpected_metrics,
)
for address in cluster_nodes.keys():
service_results, metric_results = _parse_prometheus_results([
result for result in global_results
if _host_matches(result.get('metric'), [address])
])
cluster_nodes[address]['service_results'] = service_results
cluster_nodes[address]['metric_results'] = metric_results
|
def _add_monitoring_data(cluster_nodes: dict):
"""Add metrics data and information on services for the cluster nodes."""
query_string = ' or '.join(QUERY_STRINGS.values())
global_results = prometheus_query(
query_string=query_string,
logger=current_app.logger,
timeout=config.monitoring_timeout,
)
# find unexpected metrics
unexpected_metrics = [
result for result in global_results
if not _host_matches(result.get('metric'), cluster_nodes.keys())
]
if unexpected_metrics:
current_app.logger.warning(
'These metrics do not match monitored IP address%s (%s): %s',
'' if len(cluster_nodes) == 1 else 'es',
', '.join(cluster_nodes.keys()),
unexpected_metrics,
)
for address in cluster_nodes.keys():
service_results, metric_results = _parse_prometheus_results([
result for result in global_results
if _host_matches(result.get('metric'), [address])
])
cluster_nodes[address]['service_results'] = service_results
cluster_nodes[address]['metric_results'] = metric_results
|
39,699 |
def main():
module = ForemanJobInvocationModule(
foreman_spec=dict(
search_query=dict(),
bookmark_id=dict(),
job_template_id=dict(required=True),
targeting_type=dict(default='static_query', choices=['static_query', 'dynamic_query']),
randomized_ordering=dict(type='bool', default=False),
feature=dict(),
command=dict(),
inputs=dict(),
execution_timeout_interval=dict(),
ssh=dict(type='nested_list', foreman_spec=ssh_foreman_spec),
recurrence=dict(type='nested_list', foreman_spec=recurrnece_foreman_spec),
scheduling=dict(type='nested_list', foreman_spec=scheduling_foreman_spec),
concurrency_control=dict(type='nested_list', foreman_spec=concurrency_control_foreman_spec),
),
entity_opts={'search_by': 'description'}
)
# make sure we have a search query
if 'search_query' not in module.foreman_params.keys() and 'bookmark_id' not in module.foreman_params.keys():
module.fail_json(
msg='No search query specified and no bookmark to infer it.')
# command type template validation
if module.foreman_params['job_template_id'] in ['148', '177'] and 'command' not in module.foreman_params.keys():
module.fail_json(
msg='No command specified for command template.')
# command input required by api
if 'command' in module.foreman_params.keys():
module.foreman_params['inputs'] = {"command": f"{module.foreman_params['command']}"}
module.foreman_params.pop('command')
with module.api_connection():
module.run()
|
def main():
module = ForemanJobInvocationModule(
foreman_spec=dict(
search_query=dict(),
bookmark=dict(type='entity'),
job_template_id=dict(required=True),
targeting_type=dict(default='static_query', choices=['static_query', 'dynamic_query']),
randomized_ordering=dict(type='bool', default=False),
feature=dict(),
command=dict(),
inputs=dict(),
execution_timeout_interval=dict(),
ssh=dict(type='nested_list', foreman_spec=ssh_foreman_spec),
recurrence=dict(type='nested_list', foreman_spec=recurrnece_foreman_spec),
scheduling=dict(type='nested_list', foreman_spec=scheduling_foreman_spec),
concurrency_control=dict(type='nested_list', foreman_spec=concurrency_control_foreman_spec),
),
entity_opts={'search_by': 'description'}
)
# make sure we have a search query
if 'search_query' not in module.foreman_params.keys() and 'bookmark_id' not in module.foreman_params.keys():
module.fail_json(
msg='No search query specified and no bookmark to infer it.')
# command type template validation
if module.foreman_params['job_template_id'] in ['148', '177'] and 'command' not in module.foreman_params.keys():
module.fail_json(
msg='No command specified for command template.')
# command input required by api
if 'command' in module.foreman_params.keys():
module.foreman_params['inputs'] = {"command": f"{module.foreman_params['command']}"}
module.foreman_params.pop('command')
with module.api_connection():
module.run()
|
39,998 |
def _gen_dataframe(anno, length, index_names):
if isinstance(anno, pd.DataFrame) or isinstance(anno, pd.Series):
anno = anno.copy()
if not is_string_dtype(anno.index):
logger.warning("Transforming to str index.")
anno.index = anno.index.astype(str)
if isinstance(anno, pd.Series):
anno = pd.DataFrame(anno)
return anno
if anno is None or len(anno) == 0:
_anno = pd.DataFrame(index=RangeIndex(0, length, name=None).astype(str))
else:
for index_name in index_names:
if index_name in anno:
_anno = pd.DataFrame(
anno,
index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name],
)
break
else:
_anno = pd.DataFrame(
anno, index=RangeIndex(0, length, name=None).astype(str)
)
return _anno
|
def _gen_dataframe(anno, length, index_names):
if isinstance(anno, (pd.DataFrame, pd.Series)):
anno = anno.copy()
if not is_string_dtype(anno.index):
logger.warning("Transforming to str index.")
anno.index = anno.index.astype(str)
if isinstance(anno, pd.Series):
anno = pd.DataFrame(anno)
return anno
if anno is None or len(anno) == 0:
_anno = pd.DataFrame(index=RangeIndex(0, length, name=None).astype(str))
else:
for index_name in index_names:
if index_name in anno:
_anno = pd.DataFrame(
anno,
index=anno[index_name],
columns=[k for k in anno.keys() if k != index_name],
)
break
else:
_anno = pd.DataFrame(
anno, index=RangeIndex(0, length, name=None).astype(str)
)
return _anno
|
10,089 |
def main():
ignore_files = frozenset((
'*/.git_keep',
'*/galaxy/data/default/role/*/main.yml.j2',
'*/galaxy/data/default/role/*/test.yml.j2',
'*/galaxy/data/default/collection/plugins/README.md.j2',
))
non_py_files = []
for root, dirs, files in os.walk('lib/ansible/'):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[-1] not in ('.py', '.pyc', '.pyo'):
add = True
for ignore in ignore_files:
if fnmatch.fnmatch(path, ignore):
add = False
if add:
non_py_files.append(path[12:])
with tempfile.TemporaryDirectory() as tmp_dir:
p = subprocess.Popen(
['python', 'setup.py', 'install', '--root=%s' % tmp_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = p.communicate()
match = re.search('^creating (%s/.*?/(?:site|dist)-packages/ansible)$' % tmp_dir, stdout, flags=re.M)
for filename in non_py_files:
path = os.path.join(match.group(1), filename)
if not os.path.exists(path):
print('lib/ansible/%s: File not installed' % filename)
|
def main():
ignore_files = frozenset((
'*/.git_keep',
'*/galaxy/data/default/role/*/main.yml.j2',
'*/galaxy/data/default/role/*/test.yml.j2',
'*/galaxy/data/default/collection/plugins/README.md.j2',
))
non_py_files = []
for root, dirs, files in os.walk('lib/ansible/'):
for filename in files:
path = os.path.join(root, filename)
if os.path.splitext(path)[1] not in ('.py', '.pyc', '.pyo'):
add = True
for ignore in ignore_files:
if fnmatch.fnmatch(path, ignore):
add = False
if add:
non_py_files.append(path[12:])
with tempfile.TemporaryDirectory() as tmp_dir:
p = subprocess.Popen(
['python', 'setup.py', 'install', '--root=%s' % tmp_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = p.communicate()
match = re.search('^creating (%s/.*?/(?:site|dist)-packages/ansible)$' % tmp_dir, stdout, flags=re.M)
for filename in non_py_files:
path = os.path.join(match.group(1), filename)
if not os.path.exists(path):
print('lib/ansible/%s: File not installed' % filename)
|
25,757 |
def iplot(network, fig=None, bus_colors='blue',
bus_colorscale=None, bus_colorbar=None, bus_sizes=10, bus_text=None,
line_colors='green', line_widths=2, line_text=None, layouter=None, title="", size=None,
branch_components=['Line', 'Link'], iplot=True, jitter=None,
mapbox=False, mapbox_style='open-street-map', mapbox_token="",
mapbox_parameters={}):
"""
Plot the network buses and lines interactively using plotly.
Parameters
----------
fig : dict, default None
If not None, figure is built upon this fig.
bus_colors : dict/pandas.Series
Colors for the buses, defaults to "b"
bus_colorscale : string
Name of colorscale if bus_colors are floats, e.g. 'Jet', 'Viridis'
bus_colorbar : dict
Plotly colorbar, e.g. {'title' : 'my colorbar'}
bus_sizes : dict/pandas.Series
Sizes of bus points, defaults to 10
bus_text : dict/pandas.Series
Text for each bus, defaults to bus names
line_colors : dict/pandas.Series
Colors for the lines, defaults to "g" for Lines and "cyan" for
Links. Colors for branches other than Lines can be
specified using a pandas Series with a MultiIndex.
line_widths : dict/pandas.Series
Widths of lines, defaults to 2. Widths for branches other
than Lines can be specified using a pandas Series with a
MultiIndex.
line_text : dict/pandas.Series
Text for lines, defaults to line names. Text for branches other
than Lines can be specified using a pandas Series with a
MultiIndex.
layouter : networkx.drawing.layout function, default None
Layouting function from `networkx <https://networkx.github.io/>`_ which
overrules coordinates given in ``network.buses[['x','y']]``. See
`list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_
of available options.
title : string
Graph title
size : None|tuple
Tuple specifying width and height of figure; e.g. (width, heigh).
branch_components : list of str
Branch components to be plotted, defaults to Line and Link.
iplot : bool, default True
Automatically do an interactive plot of the figure.
jitter : None|float
Amount of random noise to add to bus positions to distinguish
overlapping buses
mapbox : bool, default False
Switch to use Mapbox.
mapbox_style : str, defaul 'open-street-map'
Define the mapbox layout style of the interactive plot. If this is set
to a mapbox layout, the argument mapbox_token must be a valid Mapbox
API access token.
Valid open layouts are:
open-street-map, white-bg, carto-positron, carto-darkmatter,
stamen-terrain, stamen-toner, stamen-watercolor
Valid mapbox layouts are:
basic, streets, outdoors, light, dark, satellite, satellite-streets
mapbox_token : string
Mapbox API access token. Obtain from https://www.mapbox.com.
Can also be included in mapbox_parameters as `accesstoken=mapbox_token`.
mapbox_parameters : dict
Configuration parameters of the Mapbox layout.
E.g. {"bearing": 5, "pitch": 10, "zoom": 1, "style": 'dark'}.
Returns
-------
fig: dictionary for plotly figure
"""
defaults_for_branches = {
'Link': dict(color="cyan", width=2),
'Line': dict(color="blue", width=2),
'Transformer': dict(color='green', width=2)
}
if fig is None:
fig = dict(data=[],layout={})
if bus_text is None:
bus_text = 'Bus ' + network.buses.index
x, y = _get_coordinates(network, layouter=layouter)
if jitter is not None:
x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x))
y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y))
bus_trace = dict(x=x, y=y,
text=bus_text,
type="scatter",
mode="markers",
hoverinfo="text",
marker=dict(color=bus_colors,
size=bus_sizes),
)
if bus_colorscale is not None:
bus_trace['marker']['colorscale'] = bus_colorscale
if bus_colorbar is not None:
bus_trace['marker']['colorbar'] = bus_colorbar
def as_branch_series(ser):
if isinstance(ser, dict) and set(ser).issubset(branch_components):
return pd.Series(ser)
elif isinstance(ser, pd.Series):
if isinstance(ser.index, pd.MultiIndex):
return ser
index = ser.index
ser = ser.values
else:
index = network.lines.index
return pd.Series(ser,
index=pd.MultiIndex(levels=(["Line"], index),
labels=(np.zeros(len(index)),
np.arange(len(index)))))
line_colors = as_branch_series(line_colors)
line_widths = as_branch_series(line_widths)
if line_text is not None:
line_text = as_branch_series(line_text)
shapes = []
shape_traces = []
for c in network.iterate_components(branch_components):
l_defaults = defaults_for_branches[c.name]
l_widths = line_widths.get(c.name, l_defaults['width'])
l_colors = line_colors.get(c.name, l_defaults['color'])
if line_text is None:
l_text = c.name + ' ' + c.df.index
else:
l_text = line_text.get(c.name)
if isinstance(l_colors, pd.Series):
if issubclass(l_colors.dtype.type, np.number):
l_colors = None
else:
l_colors.fillna(l_defaults['color'], inplace=True)
x0 = c.df.bus0.map(x)
x1 = c.df.bus1.map(x)
y0 = c.df.bus0.map(y)
y1 = c.df.bus1.map(y)
for line in c.df.index:
color = l_colors if isinstance(l_colors, string_types) else l_colors[line]
width = l_widths if isinstance(l_widths, (int, float)) else l_widths[line]
shapes.append(dict(type='line',
x0=x0[line],
y0=y0[line],
x1=x1[line],
y1=y1[line],
opacity=0.7,
line=dict(color=color, width=width)))
shape_traces.append(dict(x=0.5*(x0+x1),
y=0.5*(y0+y1),
text=l_text,
type="scatter",
mode="markers",
hoverinfo="text",
marker=dict(opacity=0.)))
if mapbox:
shape_traces_latlon = []
for st in shape_traces:
st['lon'] = st.pop('x')
st['lat'] = st.pop('y')
shape_traces_latlon.append(go.Scattermapbox(st))
shape_traces = shape_traces_latlon
shapes_mapbox = []
for s in shapes:
s['lon'] = [s.pop('x0'), s.pop('x1')]
s['lat'] = [s.pop('y0'), s.pop('y1')]
shapes_mapbox.append(go.Scattermapbox(s, mode='lines'))
shapes = shapes_mapbox
bus_trace['lon'] = bus_trace.pop('x')
bus_trace['lat'] = bus_trace.pop('y')
bus_trace = go.Scattermapbox(bus_trace)
fig['data'].extend(shapes + shape_traces + [bus_trace])
else:
fig['data'].extend([bus_trace]+shape_traces)
fig['layout'].update(dict(title=title,
hovermode='closest',
showlegend=False))
if size is not None:
assert len(size) == 2, "Parameter size must specify a tuple (width, height)."
fig['layout'].update(dict(width=size[0],
height=size[1]))
if mapbox:
if mapbox_token != "":
mapbox_parameters['accesstoken'] = mapbox_token
mapbox_parameters.setdefault('style', mapbox_style)
if mapbox_parameters['style'] in _token_required_mb_styles:
assert 'accesstoken' in mapbox_parameters.keys(), ("Using Mapbox "
"layout styles requires a valid access token from https://www.mapbox.com/, "
f"style which do not require a token are:\n{', '.join(_open__mb_styles)}.")
if 'center' not in mapbox_parameters.keys():
lon=(network.buses.x.min() + network.buses.x.max()) / 2
lat=(network.buses.y.min() + network.buses.y.max()) / 2
mapbox_parameters['center'] = dict(lat=lat, lon=lon)
if 'zoom' not in mapbox_parameters.keys():
mapbox_parameters['zoom'] = 2
fig['layout']['mapbox'] = mapbox_parameters
else:
fig['layout']['shapes'] = shapes
if iplot:
if not pltly_present:
logger.warning("Plotly is not present, so interactive plotting won't work.")
else:
pltly.iplot(fig)
return fig
|
def iplot(network, fig=None, bus_colors='blue',
bus_colorscale=None, bus_colorbar=None, bus_sizes=10, bus_text=None,
line_colors='green', line_widths=2, line_text=None, layouter=None, title="", size=None,
branch_components=['Line', 'Link'], iplot=True, jitter=None,
mapbox=False, mapbox_style='open-street-map', mapbox_token="",
mapbox_parameters={}):
"""
Plot the network buses and lines interactively using plotly.
Parameters
----------
fig : dict, default None
If not None, figure is built upon this fig.
bus_colors : dict/pandas.Series
Colors for the buses, defaults to "b"
bus_colorscale : string
Name of colorscale if bus_colors are floats, e.g. 'Jet', 'Viridis'
bus_colorbar : dict
Plotly colorbar, e.g. {'title' : 'my colorbar'}
bus_sizes : dict/pandas.Series
Sizes of bus points, defaults to 10
bus_text : dict/pandas.Series
Text for each bus, defaults to bus names
line_colors : dict/pandas.Series
Colors for the lines, defaults to "g" for Lines and "cyan" for
Links. Colors for branches other than Lines can be
specified using a pandas Series with a MultiIndex.
line_widths : dict/pandas.Series
Widths of lines, defaults to 2. Widths for branches other
than Lines can be specified using a pandas Series with a
MultiIndex.
line_text : dict/pandas.Series
Text for lines, defaults to line names. Text for branches other
than Lines can be specified using a pandas Series with a
MultiIndex.
layouter : networkx.drawing.layout function, default None
Layouting function from `networkx <https://networkx.github.io/>`_ which
overrules coordinates given in ``network.buses[['x','y']]``. See
`list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_
of available options.
title : string
Graph title
size : None|tuple
Tuple specifying width and height of figure; e.g. (width, heigh).
branch_components : list of str
Branch components to be plotted, defaults to Line and Link.
iplot : bool, default True
Automatically do an interactive plot of the figure.
jitter : None|float
Amount of random noise to add to bus positions to distinguish
overlapping buses
mapbox : bool, default False
Switch to use Mapbox.
mapbox_style : str, defaul 'open-street-map'
Define the mapbox layout style of the interactive plot. If this is set
to a mapbox layout, the argument ``mapbox_token`` must be a valid Mapbox
API access token.
Valid open layouts are:
open-street-map, white-bg, carto-positron, carto-darkmatter,
stamen-terrain, stamen-toner, stamen-watercolor
Valid mapbox layouts are:
basic, streets, outdoors, light, dark, satellite, satellite-streets
mapbox_token : string
Mapbox API access token. Obtain from https://www.mapbox.com.
Can also be included in mapbox_parameters as `accesstoken=mapbox_token`.
mapbox_parameters : dict
Configuration parameters of the Mapbox layout.
E.g. {"bearing": 5, "pitch": 10, "zoom": 1, "style": 'dark'}.
Returns
-------
fig: dictionary for plotly figure
"""
defaults_for_branches = {
'Link': dict(color="cyan", width=2),
'Line': dict(color="blue", width=2),
'Transformer': dict(color='green', width=2)
}
if fig is None:
fig = dict(data=[],layout={})
if bus_text is None:
bus_text = 'Bus ' + network.buses.index
x, y = _get_coordinates(network, layouter=layouter)
if jitter is not None:
x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x))
y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y))
bus_trace = dict(x=x, y=y,
text=bus_text,
type="scatter",
mode="markers",
hoverinfo="text",
marker=dict(color=bus_colors,
size=bus_sizes),
)
if bus_colorscale is not None:
bus_trace['marker']['colorscale'] = bus_colorscale
if bus_colorbar is not None:
bus_trace['marker']['colorbar'] = bus_colorbar
def as_branch_series(ser):
if isinstance(ser, dict) and set(ser).issubset(branch_components):
return pd.Series(ser)
elif isinstance(ser, pd.Series):
if isinstance(ser.index, pd.MultiIndex):
return ser
index = ser.index
ser = ser.values
else:
index = network.lines.index
return pd.Series(ser,
index=pd.MultiIndex(levels=(["Line"], index),
labels=(np.zeros(len(index)),
np.arange(len(index)))))
line_colors = as_branch_series(line_colors)
line_widths = as_branch_series(line_widths)
if line_text is not None:
line_text = as_branch_series(line_text)
shapes = []
shape_traces = []
for c in network.iterate_components(branch_components):
l_defaults = defaults_for_branches[c.name]
l_widths = line_widths.get(c.name, l_defaults['width'])
l_colors = line_colors.get(c.name, l_defaults['color'])
if line_text is None:
l_text = c.name + ' ' + c.df.index
else:
l_text = line_text.get(c.name)
if isinstance(l_colors, pd.Series):
if issubclass(l_colors.dtype.type, np.number):
l_colors = None
else:
l_colors.fillna(l_defaults['color'], inplace=True)
x0 = c.df.bus0.map(x)
x1 = c.df.bus1.map(x)
y0 = c.df.bus0.map(y)
y1 = c.df.bus1.map(y)
for line in c.df.index:
color = l_colors if isinstance(l_colors, string_types) else l_colors[line]
width = l_widths if isinstance(l_widths, (int, float)) else l_widths[line]
shapes.append(dict(type='line',
x0=x0[line],
y0=y0[line],
x1=x1[line],
y1=y1[line],
opacity=0.7,
line=dict(color=color, width=width)))
shape_traces.append(dict(x=0.5*(x0+x1),
y=0.5*(y0+y1),
text=l_text,
type="scatter",
mode="markers",
hoverinfo="text",
marker=dict(opacity=0.)))
if mapbox:
shape_traces_latlon = []
for st in shape_traces:
st['lon'] = st.pop('x')
st['lat'] = st.pop('y')
shape_traces_latlon.append(go.Scattermapbox(st))
shape_traces = shape_traces_latlon
shapes_mapbox = []
for s in shapes:
s['lon'] = [s.pop('x0'), s.pop('x1')]
s['lat'] = [s.pop('y0'), s.pop('y1')]
shapes_mapbox.append(go.Scattermapbox(s, mode='lines'))
shapes = shapes_mapbox
bus_trace['lon'] = bus_trace.pop('x')
bus_trace['lat'] = bus_trace.pop('y')
bus_trace = go.Scattermapbox(bus_trace)
fig['data'].extend(shapes + shape_traces + [bus_trace])
else:
fig['data'].extend([bus_trace]+shape_traces)
fig['layout'].update(dict(title=title,
hovermode='closest',
showlegend=False))
if size is not None:
assert len(size) == 2, "Parameter size must specify a tuple (width, height)."
fig['layout'].update(dict(width=size[0],
height=size[1]))
if mapbox:
if mapbox_token != "":
mapbox_parameters['accesstoken'] = mapbox_token
mapbox_parameters.setdefault('style', mapbox_style)
if mapbox_parameters['style'] in _token_required_mb_styles:
assert 'accesstoken' in mapbox_parameters.keys(), ("Using Mapbox "
"layout styles requires a valid access token from https://www.mapbox.com/, "
f"style which do not require a token are:\n{', '.join(_open__mb_styles)}.")
if 'center' not in mapbox_parameters.keys():
lon=(network.buses.x.min() + network.buses.x.max()) / 2
lat=(network.buses.y.min() + network.buses.y.max()) / 2
mapbox_parameters['center'] = dict(lat=lat, lon=lon)
if 'zoom' not in mapbox_parameters.keys():
mapbox_parameters['zoom'] = 2
fig['layout']['mapbox'] = mapbox_parameters
else:
fig['layout']['shapes'] = shapes
if iplot:
if not pltly_present:
logger.warning("Plotly is not present, so interactive plotting won't work.")
else:
pltly.iplot(fig)
return fig
|
32,065 |
def fetch_incidents_command(client):
"""
The fetch runs on instance's context, gets the formatted incidents, and add attachments if needed.
It then clears the context so incident would not be duplicated.
"""
data = get_integration_context()
incidents = data.pop('incidents') if 'incidents' in data else []
for incident in incidents:
if 'attachment' in incident:
demisto.debug('Found incident, getting attachments')
_add_attachments(client, incident)
# clear the integration contex from already seen incidents
set_integration_context({'incidents': []})
return incidents
|
def fetch_incidents_command(client):
"""
The fetch runs on instance's context, gets the formatted incidents, and add attachments if needed.
It then clears the context so incident would not be duplicated.
"""
data = get_integration_context()
incidents = data.pop('incidents', [])
for incident in incidents:
if 'attachment' in incident:
demisto.debug('Found incident, getting attachments')
_add_attachments(client, incident)
# clear the integration contex from already seen incidents
set_integration_context({'incidents': []})
return incidents
|
4,471 |
def _voroni_topomap(data, pos, info, sphere, ch_type, outlines, ax, cmap,
norm):
"""Make a Voroni diagram on a topomap."""
from scipy.spatial import Voronoi
sphere = _check_sphere(sphere)
clip_origin = _adjust_meg_sphere(sphere, info, ch_type)[1]
outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
rx, ry = outlines['clip_radius']
cx, cy = clip_origin
# add faroff points in a circle
vor = Voronoi(np.concatenate([pos, [(np.cos(2 * np.pi / 100 * t),
np.sin(2 * np.pi / 100 * t))
for t in range(101)]]))
for point_idx, region_idx in enumerate(vor.point_region[:-101]):
if -1 in vor.regions[region_idx]:
continue
polygon = list()
for i in vor.regions[region_idx]:
x, y = vor.vertices[i]
if (x - cx)**2 / rx**2 + (y - cy)**2 / ry**2 < 1:
polygon.append((x, y))
else:
x *= rx / np.linalg.norm(vor.vertices[i])
y *= ry / np.linalg.norm(vor.vertices[i])
polygon.append((x, y))
ax.fill(*zip(*polygon), color=cmap(norm(data[point_idx])))
|
def _voronoi_topomap(data, pos, info, sphere, ch_type, outlines, ax, cmap,
norm):
"""Make a Voroni diagram on a topomap."""
from scipy.spatial import Voronoi
sphere = _check_sphere(sphere)
clip_origin = _adjust_meg_sphere(sphere, info, ch_type)[1]
outlines = _make_head_outlines(
sphere, pos, outlines, clip_origin)
rx, ry = outlines['clip_radius']
cx, cy = clip_origin
# add faroff points in a circle
vor = Voronoi(np.concatenate([pos, [(np.cos(2 * np.pi / 100 * t),
np.sin(2 * np.pi / 100 * t))
for t in range(101)]]))
for point_idx, region_idx in enumerate(vor.point_region[:-101]):
if -1 in vor.regions[region_idx]:
continue
polygon = list()
for i in vor.regions[region_idx]:
x, y = vor.vertices[i]
if (x - cx)**2 / rx**2 + (y - cy)**2 / ry**2 < 1:
polygon.append((x, y))
else:
x *= rx / np.linalg.norm(vor.vertices[i])
y *= ry / np.linalg.norm(vor.vertices[i])
polygon.append((x, y))
ax.fill(*zip(*polygon), color=cmap(norm(data[point_idx])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.