id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
42,529 |
def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> np.ndarray:
"""
This function computes the orthogonal projections of a batch of points on L1-balls of given radii
The batch size is m = values.shape[0]. The points are flattened to dimension
n = np.prod(value.shape[1:]). This is required to facilitate sorting.
If a[0] <= ... <= a[n-1], then the projection can be characterized using the largest j such that
a[j+1] +...+ a[n-1] - a[j]*(n-j-1) >= eps. The ith coordinate of projection is equal to 0
if i=0,...,j.
:param values: A batch of m points, each an ndarray
:param eps: The radii of the respective L1-balls
:return: projections
"""
shp = values.shape
a = values.copy()
n = np.prod(a.shape[1:])
m = a.shape[0]
a = a.reshape((m, n))
sgns = np.sign(a)
a = np.abs(a)
a_argsort = a.argsort(axis=1)
a_sorted = np.zeros((m, n))
for i in range(m):
a_sorted[i, :] = a[i, a_argsort[i, :]]
a_argsort_inv = a.argsort(axis=1).argsort(axis=1)
mat = np.zeros((m, 2))
# if a_sorted[i, n-1] >= a_sorted[i, n-2] + eps, then the projection is [0,...,0,eps]
done = False
active = [1] * m
after_vec = np.zeros((m, n))
proj = a_sorted.copy()
j = n - 2
while j >= 0:
mat[:, 0] = mat[:, 0] + a_sorted[:, j+1] # = sum(a_sorted[: i] : i = j+1,...,n-1
mat[:, 1] = a_sorted[:, j] * (n-j-1) + eps
# Find the max in each problem max{ sum{a_sorted[:, i] : i=j+1,..,n-1} , a_sorted[:, j] * (n-j-1) + eps }
row_maxes = np.max(mat, axis=1)
# Set to 1 if max > a_sorted[:, j] * (n-j-1) + eps > sum ; otherwise, set to 0
ind_set = np.sign(np.sign(row_maxes - mat[:, 0]))
# ind_set = ind_set.reshape((m, 1))
# Multiplier for activation
act_multiplier = (1 - ind_set) * active
act_multiplier = np.transpose([np.transpose(act_multiplier)] * n)
# if done, the projection is supported by the current indices j+1,..,n-1 and the amount by which each
# has to be reduced is delta
delta = (mat[:, 0] - eps)/(n - j - 1)
# The vector of reductions
delta_vec = np.array([delta] * (n - j - 1))
delta_vec = np.transpose(delta_vec)
# The sub-vectors: a_sorted[:, (j+1):]
a_sub = a_sorted[:, (j+1):]
# After reduction by delta_vec
a_after = a_sub - delta_vec
after_vec[:, (j+1):] = a_after
proj = (act_multiplier * after_vec) + ((1 - act_multiplier) * proj)
active = active * ind_set
if sum(active) == 0:
done = True
break
j -= 1
if not done:
proj = active * a_sorted + (1 - active) * proj
for i in range(m):
proj[i, :] = proj[i, a_argsort_inv[i, :]]
proj = sgns * proj
proj = proj.reshape(shp)
return proj
|
def projection_l1_1(values: np.ndarray, eps: Union[int, float, np.ndarray]) -> np.ndarray:
"""
This function computes the orthogonal projections of a batch of points on L1-balls of given radii
The batch size is m = values.shape[0]. The points are flattened to dimension
n = np.prod(value.shape[1:]). This is required to facilitate sorting.
If a[0] <= ... <= a[n-1], then the projection can be characterized using the largest j such that
a[j+1] +...+ a[n-1] - a[j]*(n-j-1) >= eps. The ith coordinate of projection is equal to 0
if i=0,...,j.
:param values: A batch of m points, each an ndarray
:param eps: The radii of the respective L1-balls
:return: projections
"""
shp = values.shape
a = values.copy()
n = np.prod(a.shape[1:])
m = a.shape[0]
a = a.reshape((m, n))
sgns = np.sign(a)
a = np.abs(a)
a_argsort = a.argsort(axis=1)
a_sorted = np.zeros((m, n))
for i in range(m):
a_sorted[i, :] = a[i, a_argsort[i, :]]
a_argsort_inv = a.argsort(axis=1).argsort(axis=1)
mat = np.zeros((m, 2))
# if a_sorted[i, n-1] >= a_sorted[i, n-2] + eps, then the projection is [0,...,0,eps]
done = False
active = [1] * m
after_vec = np.zeros((m, n))
proj = a_sorted.copy()
j = n - 2
while j >= 0:
mat[:, 0] = mat[:, 0] + a_sorted[:, j + 1] # = sum(a_sorted[: i] : i = j + 1,...,n-1
mat[:, 1] = a_sorted[:, j] * (n-j-1) + eps
# Find the max in each problem max{ sum{a_sorted[:, i] : i=j+1,..,n-1} , a_sorted[:, j] * (n-j-1) + eps }
row_maxes = np.max(mat, axis=1)
# Set to 1 if max > a_sorted[:, j] * (n-j-1) + eps > sum ; otherwise, set to 0
ind_set = np.sign(np.sign(row_maxes - mat[:, 0]))
# ind_set = ind_set.reshape((m, 1))
# Multiplier for activation
act_multiplier = (1 - ind_set) * active
act_multiplier = np.transpose([np.transpose(act_multiplier)] * n)
# if done, the projection is supported by the current indices j+1,..,n-1 and the amount by which each
# has to be reduced is delta
delta = (mat[:, 0] - eps)/(n - j - 1)
# The vector of reductions
delta_vec = np.array([delta] * (n - j - 1))
delta_vec = np.transpose(delta_vec)
# The sub-vectors: a_sorted[:, (j+1):]
a_sub = a_sorted[:, (j+1):]
# After reduction by delta_vec
a_after = a_sub - delta_vec
after_vec[:, (j+1):] = a_after
proj = (act_multiplier * after_vec) + ((1 - act_multiplier) * proj)
active = active * ind_set
if sum(active) == 0:
done = True
break
j -= 1
if not done:
proj = active * a_sorted + (1 - active) * proj
for i in range(m):
proj[i, :] = proj[i, a_argsort_inv[i, :]]
proj = sgns * proj
proj = proj.reshape(shp)
return proj
|
8,431 |
def _centroid_single_region(spectrum, region=None):
"""
Calculate the centroid of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the centroid will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the centroid.
Returns
-------
centroid : float or list (based on region input)
Centroid of the spectrum or within the regions
Notes
-----
This is a helper function for the above `centroid()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
dispersion = (calc_spectrum.spectral_axis).quantity
if len(flux.shape) > 1:
dispersion = (np.tile(dispersion, [flux.shape[0], 1]))
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.sum(flux * dispersion, axis=-1) / np.sum(flux, axis=-1)
|
def _centroid_single_region(spectrum, region=None):
"""
Calculate the centroid of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the centroid will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the centroid.
Returns
-------
centroid : float or list (based on region input)
Centroid of the spectrum or within the regions
Notes
-----
This is a helper function for the above `centroid()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
dispersion = calc_spectrum.spectral_axis.quantity
if len(flux.shape) > 1:
dispersion = (np.tile(dispersion, [flux.shape[0], 1]))
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.sum(flux * dispersion, axis=-1) / np.sum(flux, axis=-1)
|
34,370 |
def _get_conversation_ids_to_process(
tracker_store: TrackerStore,
requested_conversation_ids: Optional[List[Text]] = None,
) -> List[Text]:
"""Get conversation IDs that are good for processing.
Finds the intersection of events that are contained in the tracker store with
those events requested as a command-line argument.
Prints an error and if no conversation IDs are found in the tracker, or if no
overlap is found between those contained in the tracker and those requested
by the user.
Args:
tracker_store: Tracker store to source events from.
requested_conversation_ids: List of conversation IDs that should be published
requested by the user. If `None`, all conversation IDs contained in the
tracker store are published.
Returns:
Conversation IDs that are both requested and contained in the tracker
store. If no conversation IDs are requested, all conversation IDs in the
tracker store are returned.
"""
conversation_ids_in_tracker_store = list(tracker_store.keys())
if not conversation_ids_in_tracker_store:
cli_utils.print_error_and_exit(
f"Could not find any conversations in connected tracker store. Exiting."
)
if not requested_conversation_ids:
return conversation_ids_in_tracker_store
missing_ids_in_tracker_store = set(requested_conversation_ids) - set(
conversation_ids_in_tracker_store
)
if missing_ids_in_tracker_store:
cli_utils.print_warning(
f"Could not find the following requested "
f"conversation IDs in connected tracker store: "
f"{', '.join(sorted(missing_ids_in_tracker_store))}"
)
conversation_ids_to_process = set(conversation_ids_in_tracker_store) & set(
requested_conversation_ids
)
if not conversation_ids_to_process:
cli_utils.print_error_and_exit(
"Could not find an overlap between the requested "
"conversation IDs and those found in the tracker store. Exiting."
)
return list(conversation_ids_to_process)
|
def _get_conversation_ids_to_process(
tracker_store: TrackerStore,
requested_conversation_ids: Optional[List[Text]] = None,
) -> List[Text]:
"""Get conversation IDs that are good for processing.
Finds the intersection of events that are contained in the tracker store with
those events requested as a command-line argument.
Prints an error and if no conversation IDs are found in the tracker, or if no
overlap is found between those contained in the tracker and those requested
by the user.
Args:
tracker_store: Tracker store to source events from.
requested_conversation_ids: List of conversation IDs that should be published
requested by the user. If `None`, all conversation IDs contained in the
tracker store are published.
Returns:
Conversation IDs that are both requested and contained in the tracker
store. If no conversation IDs are requested, all conversation IDs in the
tracker store are returned.
"""
conversation_ids_in_tracker_store =set(tracker_store.keys())
if not conversation_ids_in_tracker_store:
cli_utils.print_error_and_exit(
f"Could not find any conversations in connected tracker store. Exiting."
)
if not requested_conversation_ids:
return conversation_ids_in_tracker_store
missing_ids_in_tracker_store = set(requested_conversation_ids) - set(
conversation_ids_in_tracker_store
)
if missing_ids_in_tracker_store:
cli_utils.print_warning(
f"Could not find the following requested "
f"conversation IDs in connected tracker store: "
f"{', '.join(sorted(missing_ids_in_tracker_store))}"
)
conversation_ids_to_process = set(conversation_ids_in_tracker_store) & set(
requested_conversation_ids
)
if not conversation_ids_to_process:
cli_utils.print_error_and_exit(
"Could not find an overlap between the requested "
"conversation IDs and those found in the tracker store. Exiting."
)
return list(conversation_ids_to_process)
|
55,632 |
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
"""
assert image.dim() >= 2
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
B, _, rows, cols = image.shape
if image.dtype != torch.float:
image = image.float()
# Create point coordinates for each pixel of the image
x, y = torch.meshgrid(torch.arange(cols), torch.arange(rows))
pts: torch.Tensor = torch.cat([x.T.float().reshape(-1,1), y.T.reshape(-1,1)], 1) # (rows*cols)x2
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
out = torch.round(torch.clamp(out, 0, 255)).to(torch.uint8)
return out
|
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
assert K.shape[-2:] == (3, 3)
assert dist.shape[-1] in [4, 5, 8, 12, 14]
B, _, rows, cols = image.shape
if image.dtype != torch.float:
image = image.float()
# Create point coordinates for each pixel of the image
x, y = torch.meshgrid(torch.arange(cols), torch.arange(rows))
pts: torch.Tensor = torch.cat([x.T.float().reshape(-1,1), y.T.reshape(-1,1)], 1) # (rows*cols)x2
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
out = torch.round(torch.clamp(out, 0, 255)).to(torch.uint8)
return out
|
17,454 |
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
vectorize=False,
keep_attrs="override",
dask_gufunc_kwargs=None,
):
"""Apply a ndarray level function over Variable and/or ndarray objects."""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)
]
if any(is_duck_dask_array(array) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
numpy_func = func
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None)
if allow_rechunk is None:
for n, (data, core_dims) in enumerate(
zip(input_data, signature.input_core_dims)
):
if is_duck_dask_array(data):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
f"dimension {dim} on {n}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, either rechunk into a single dask array chunk along "
f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or "
"pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` "
"but beware that this may significantly increase memory usage."
)
dask_gufunc_kwargs["allow_rechunk"] = True
output_sizes = dask_gufunc_kwargs.pop("output_sizes", {})
if output_sizes:
output_sizes_renamed = {}
for key, value in output_sizes.items():
if key not in signature.all_output_core_dims:
raise ValueError(
f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims"
)
output_sizes_renamed[signature.dims_map[key]] = value
dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed
for key in signature.all_output_core_dims:
if key not in signature.all_input_core_dims and key not in output_sizes:
raise ValueError(
f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'"
)
def func(*arrays):
import dask.array as da
res = da.apply_gufunc(
numpy_func,
signature.to_gufunc_string(exclude_dims),
*arrays,
vectorize=vectorize,
output_dtypes=output_dtypes,
**dask_gufunc_kwargs,
)
# todo: covers for https://github.com/dask/dask/pull/6207
# remove when minimal dask version >= 2.17.0
from dask import __version__ as dask_version
if LooseVersion(dask_version) < LooseVersion("2.17.0"):
if signature.num_outputs > 1:
res = tuple(res)
return res
elif dask == "allowed":
pass
else:
raise ValueError(
"unknown setting for dask array handling in "
"apply_ufunc: {}".format(dask)
)
else:
if vectorize:
func = _vectorize(
func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
"applied function does not have the number of "
"outputs specified in the ufunc signature. "
"Result is not a tuple of {} elements: {!r}".format(
signature.num_outputs, result_data
)
)
objs = _all_of_type(args, Variable)
attrs = merge_attrs(
[obj.attrs for obj in objs],
combine_attrs=keep_attrs,
)
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with unexpected "
f"number of dimensions. Received {data.ndim} dimension(s) but "
f"expected {len(dims)} dimensions with names: {dims!r}"
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
"size of dimension {!r} on inputs was unexpectedly "
"changed by applied function from {} to {}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size.".format(
dim, dim_sizes[dim], new_size
)
)
var.attrs = attrs
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
|
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
vectorize=False,
keep_attrs="override",
dask_gufunc_kwargs=None,
):
"""Apply a ndarray level function over Variable and/or ndarray objects."""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)
]
if any(is_duck_dask_array(array) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
numpy_func = func
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None)
if allow_rechunk is None:
for n, (data, core_dims) in enumerate(
zip(input_data, signature.input_core_dims)
):
if is_duck_dask_array(data):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
f"dimension {dim} on {n}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, either rechunk into a single dask array chunk along "
f"this dimension, i.e., ``.chunk(dict(dim=-1))``, or "
"pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` "
"but beware that this may significantly increase memory usage."
)
dask_gufunc_kwargs["allow_rechunk"] = True
output_sizes = dask_gufunc_kwargs.pop("output_sizes", {})
if output_sizes:
output_sizes_renamed = {}
for key, value in output_sizes.items():
if key not in signature.all_output_core_dims:
raise ValueError(
f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims"
)
output_sizes_renamed[signature.dims_map[key]] = value
dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed
for key in signature.all_output_core_dims:
if key not in signature.all_input_core_dims and key not in output_sizes:
raise ValueError(
f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'"
)
def func(*arrays):
import dask.array as da
res = da.apply_gufunc(
numpy_func,
signature.to_gufunc_string(exclude_dims),
*arrays,
vectorize=vectorize,
output_dtypes=output_dtypes,
**dask_gufunc_kwargs,
)
# todo: covers for https://github.com/dask/dask/pull/6207
# remove when minimal dask version >= 2.17.0
from dask import __version__ as dask_version
if LooseVersion(dask_version) < LooseVersion("2.17.0"):
if signature.num_outputs > 1:
res = tuple(res)
return res
elif dask == "allowed":
pass
else:
raise ValueError(
"unknown setting for dask array handling in "
"apply_ufunc: {}".format(dask)
)
else:
if vectorize:
func = _vectorize(
func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
"applied function does not have the number of "
"outputs specified in the ufunc signature. "
"Result is not a tuple of {} elements: {!r}".format(
signature.num_outputs, result_data
)
)
objs = _all_of_type(args, Variable)
attrs = merge_attrs(
[obj.attrs for obj in objs],
combine_attrs=keep_attrs,
)
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with unexpected "
f"number of dimensions. Received {data.ndim} dimension(s) but "
f"expected {len(dims)} dimensions with names: {dims!r}"
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
"size of dimension {!r} on inputs was unexpectedly "
"changed by applied function from {} to {}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size.".format(
dim, dim_sizes[dim], new_size
)
)
var.attrs = attrs
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
|
5,382 |
def sync_netapi(saltenv="base", extmod_whitelist=None, extmod_blacklist=None):
"""
.. versionadded:: 3003
Sync netapi modules from ``salt://_netapi`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_netapi
"""
return salt.utils.extmods.sync(
__opts__,
"netapi",
saltenv=saltenv,
extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist,
)[0]
|
def sync_netapi(saltenv="base", extmod_whitelist=None, extmod_blacklist=None):
"""
.. versionadded:: 3004
Sync netapi modules from ``salt://_netapi`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_netapi
"""
return salt.utils.extmods.sync(
__opts__,
"netapi",
saltenv=saltenv,
extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist,
)[0]
|
4,298 |
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with information from the header
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
def parse_nedf_header(filename):
"""
Read the header information from the first 10kB of an .nedf file
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
info : dict
A dictionary with header information.
dt : numpy.dtype
structure of the binary EEG+accelerometer+trigger data in the file
"""
info = {}
# nedf files have some accelerometer channels sampled at 100Hz and
# several other channels sampled at 500Hz.
# The layout is
# (100HzCh1S1, 100HzCh2S1, 100HzCh3S1),
# ((500HzCh1S1, 500HzCh2S1, …, 500HzChnS1),…,
# (500HzCh1S2, 500HzCh2S2, …, 500HzChnS2), …
# (500HzCh1S5, 500HzCh2S5, …, 500HzChnS5)),
# (100HzCh1S2, 100HzCh2S2, 100HzCh3S2) and so on
# dtype for the binary data block
dt = []
# dtype for a single EEG sample
datadt = []
with open(filename, 'rb') as f:
header = f.read(10240)
headerend = header.find(b'\0')
if headerend == -1:
raise RuntimeError('End of header null not found')
headerxml = ElementTree.fromstring(header[:headerend])
nedfversion = headerxml.findtext('NEDFversion', '')
if nedfversion not in ['1.3', '1.4']:
print('Unexpected NEDFversion, hope this works anyway')
if headerxml.findtext('AdditionalChannelStatus', 'OFF') != 'OFF':
raise RuntimeError('Unknown additional channel, aborting.')
n_acc = int(headerxml.findtext('NumberOfChannelsOfAccelerometer', 0))
if n_acc:
# expect one sample of u16 accelerometer data per block
dt.append(('acc', '>u2', (n_acc,)))
eegset = headerxml.find('EEGSettings')
if eegset is None:
raise RuntimeError('No EEG channels found')
nchantotal = int(eegset.find('TotalNumberOfChannels').text)
info['nchan'] = nchantotal
info['sfreq'] = int(eegset.find('EEGSamplingRate').text)
info['ch_names'] = [e.text for e in eegset.find('EEGMontage')]
# expect nchantotal uint24s
datadt.append(('eeg', 'B', (nchantotal, 3)))
info['units'] = eegset.find('EEGUnits')
if headerxml.find('STIMSettings'):
# 2* -> two stim samples per eeg sample
datadt.append(('stim', 'B', (2, nchantotal, 3)))
if 'AdditionalChannelStatus' in headerxml:
raise RuntimeError('Unexpected AdditionalChannelStatus')
if headerxml.findtext('stepDetails/DeviceClass', '') == 'STARSTIM':
print('Found Starstim, not sure how to handle this')
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
trigger_type = '>i4' if headerxml.findtext('NEDFversion') else 'B'
datadt.append(('trig', trigger_type))
# 5 data samples per block
dt.append(('data', np.dtype(datadt), (5,)))
date = headerxml.findtext('StepDetails/StartDate_firstEEGTimestamp', '')
info['meas_date'] = datetime.datetime.utcfromtimestamp(int(date) / 1000)
return info, np.dtype(dt)
|
57,650 |
def url_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]:
"""
Executes URL enrichment against X-Force Exchange.
Args:
client (Client): X-Force client.
args (Dict[str, str]): the arguments for the command.
Returns:
str: human readable presentation of the URL report.
dict: the results to return into Demisto's context.
Any: the raw data from X-Force client (used for debugging).
"""
urls = argToList(args.get('url', ''))
threshold = int(demisto.params().get('url_threshold', DEFAULT_THRESHOLD))
context: Dict[str, Any] = defaultdict(list)
markdown = ''
reports = []
for url in urls:
report = client.url_report(url)
outputs = {'Data': report['url']}
dbot_score = {'Indicator': report['url'], 'Type': 'url', 'Vendor': 'XFE',
'Score': calculate_score(report['score'], threshold)}
if dbot_score['Score'] == 3:
outputs['Malicious'] = {'Vendor': 'XFE'}
context[outputPaths['url']].append(outputs)
context[DBOT_SCORE_KEY].append(dbot_score)
table = {'Score': report['score'],
'Categories': '\n'.join(report['cats'].keys())}
markdown += tableToMarkdown(f'X-Force URL Reputation for: {report["url"]}\n'
f'{XFORCE_URL}/url/{report["url"]}', table, removeNull=True)
reports.append(report)
return markdown, context, reports
|
def url_command(client: Client, args: Dict[str, str]) -> Tuple[str, dict, Any]:
"""
Executes URL enrichment against X-Force Exchange.
Args:
client (Client): X-Force client.
args (Dict[str, str]): the arguments for the command.
Returns:
str: human readable presentation of the URL report.
dict: the results to return into Demisto's context.
Any: the raw data from X-Force client (used for debugging).
"""
urls = argToList(args.get('url', ''))
threshold = int(demisto.params().get('url_threshold', DEFAULT_THRESHOLD))
context: Dict[str, Any] = defaultdict(list)
markdown = ''
reports = []
for url in urls:
report = client.url_report(url)
outputs = {'Data': report['url']}
dbot_score = {'Indicator': report['url'], 'Type': 'url', 'Vendor': 'XFE',
'Score': calculate_score(report['score'], threshold)}
if dbot_score['Score'] == 3:
outputs['Malicious'] = {'Vendor': 'XFE'}
context[outputPaths['url']].append(outputs)
context[DBOT_SCORE_KEY].append(dbot_score)
table = {'Score': report['score'],
'Categories': '\n'.join(report['cats'].keys())}
markdown += tableToMarkdown(f'X-Force URL Reputation for: {report["url"]}\n'
f'{XFORCE_URL}/url/{report["url"]}', table, removeNull=True)
reports.append(report)
return markdown, context, reports
|
50,700 |
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Plex platform."""
if PLEX_DOMAIN not in hass.data:
hass.data[PLEX_DOMAIN] = False
# Check if already configured
if hass.data[PLEX_DOMAIN]:
return
# get config from plex.conf
file_config = load_json(hass.config.path(PLEX_CONFIG_FILE))
if file_config and config:
# Setup a configured PlexServer
host, host_config = file_config.popitem()
token = host_config["token"]
try:
has_ssl = host_config["ssl"]
except KeyError:
has_ssl = False
try:
verify_ssl = host_config["verify"]
except KeyError:
verify_ssl = True
# Via discovery
elif discovery_info is not None:
# Parse discovery data
host = discovery_info.get("host")
port = discovery_info.get("port")
host = f"{host}:{port}"
_LOGGER.info("Discovered PLEX server: %s", host)
if host in _CONFIGURING:
return
token = None
has_ssl = False
verify_ssl = True
else:
return
setup_plexserver(
host, token, has_ssl, verify_ssl, hass, config, add_entities_callback
)
|
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Plex platform."""
plex_data = hass.data.setdefault(PLEX_DOMAIN, {})
server_setup = plex_data.setdefault(SERVER_SETUP, False)
if server_setup:
return
hass.data[PLEX_DOMAIN] = False
# Check if already configured
if hass.data[PLEX_DOMAIN]:
return
# get config from plex.conf
file_config = load_json(hass.config.path(PLEX_CONFIG_FILE))
if file_config and config:
# Setup a configured PlexServer
host, host_config = file_config.popitem()
token = host_config["token"]
try:
has_ssl = host_config["ssl"]
except KeyError:
has_ssl = False
try:
verify_ssl = host_config["verify"]
except KeyError:
verify_ssl = True
# Via discovery
elif discovery_info is not None:
# Parse discovery data
host = discovery_info.get("host")
port = discovery_info.get("port")
host = f"{host}:{port}"
_LOGGER.info("Discovered PLEX server: %s", host)
if host in _CONFIGURING:
return
token = None
has_ssl = False
verify_ssl = True
else:
return
setup_plexserver(
host, token, has_ssl, verify_ssl, hass, config, add_entities_callback
)
|
43,975 |
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
500 |
def get_template_hsm_parts(message_text):
"""The magic string users enter looks like: cc_wa_template:template_name:lang_code:{var1}{var2}{var3}
"""
HsmParts = namedtuple("hsm_parts", "template_name lang_code params")
parts = message_text.split(":", maxsplit=3)
try:
params = re.findall("{(.+?)}+", parts[3])
except IndexError:
params = []
try:
return HsmParts(template_name=parts[1], lang_code=parts[2], params=params)
except IndexError:
raise WhatsAppTemplateStringException
|
def get_template_hsm_parts(message_text):
"""The magic string users enter looks like: cc_wa_template:template_name:lang_code:{var1}{var2}{var3}
"""
HsmParts = namedtuple("hsm_parts", "template_name lang_code params")
_, template_name, lang_code, params = message_text.split(":", maxsplit=3)
try:
params = re.findall("{(.+?)}+", parts[3])
except IndexError:
params = []
try:
return HsmParts(template_name=parts[1], lang_code=parts[2], params=params)
except IndexError:
raise WhatsAppTemplateStringException
|
25,988 |
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
c.argument('size', help='The VM size to be updated. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-offers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-skus') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-publishers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']))
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "
"value is 2020-11-01.")
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm application set', 'vmss application set']:
with self.argument_context(scope) as c:
c.argument('vm', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.")
c.argument('order_applications', action='store_true', help='Whether set order index at each gallery applications, the order index starts from 1.')
c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*',
help='Space-separated application configuration overrides for each application version ids. '
'It should have the same number of items as the application version ids. Null is available for a application '
'which does not have a configuration override.')
for scope in ['vm application list', 'vmss application list']:
with self.argument_context(scope) as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
c.argument('vmss_name', vmss_name_type, id_part=None)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'],
help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.',
min_api='2021-04-01', is_preview=True)
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
with self.argument_context('capacity reservation group') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'],
help='The name of the capacity reservation group.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation group create') as c:
c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.')
with self.argument_context('capacity reservation group show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.')
with self.argument_context('capacity reservation group list') as c:
c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.')
c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.')
with self.argument_context('capacity reservation') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'],
help='The name of the capacity reservation group.')
c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'],
help='The name of the capacity reservation.')
c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation create') as c:
c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.')
c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.')
with self.argument_context('capacity reservation show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
|
def load_arguments(self, _):
# Model imports
StorageAccountTypes = self.get_models('StorageAccountTypes')
DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks')
SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots')
UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes')
HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration')
DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes')
OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets')
RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux')
GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries')
ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions')
# REUSABLE ARGUMENT DEFINITIONS
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME')
multi_ids_type = CLIArgumentType(nargs='+')
existing_vm_name = CLIArgumentType(overrides=name_arg_type,
configured_default='vm',
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name')
existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name')
existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name')
vmss_name_type = CLIArgumentType(name_arg_type,
configured_default='vmss',
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'),
help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`",
id_part='name')
extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.")
image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name')
disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name')
# StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute
DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes
if DiskStorageAccountTypes:
disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes))
else:
# StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
if SnapshotStorageAccountTypes:
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes))
else:
# SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package..
# However, 2017-03-09-profile targets version 2016-03-30 of compute package.
snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS']))
# special case for `network nic scale-set list` command alias
with self.argument_context('network nic scale-set list') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration
if HyperVGenerationTypes:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1"))
else:
hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1"))
ultra_ssd_enabled_type = CLIArgumentType(
arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account')
scale_in_policy_type = CLIArgumentType(
nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')),
help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.'
)
edge_zone_type = CLIArgumentType(
help='The name of edge zone.',
min_api='2020-12-01',
is_preview=True
)
t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries')
shared_to_type = CLIArgumentType(
arg_type=get_enum_type(t_shared_to),
help='The query parameter to decide what shared galleries to fetch when doing listing operations. '
'If not specified, list by subscription id.'
)
# region MixedScopes
for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']:
with self.argument_context(scope) as c:
c.argument('tags', tags_type)
for scope in ['disk', 'snapshot']:
with self.argument_context(scope) as c:
c.ignore('source_blob_uri', 'source_disk', 'source_snapshot')
c.argument('source_storage_account_id', help='used when source blob is in a different subscription')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int)
if self.supported_api_version(min_api='2018-09-30', operation_group='disks'):
c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level')
c.argument('for_upload', arg_type=get_three_state_flag(),
help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope))
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
else:
c.ignore('access_level', 'for_upload', 'hyper_v_generation')
c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')),
help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.')
c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.')
c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.')
operation_group = 'disks' if scope == 'disk' else 'snapshots'
c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group)))
c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.')
c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.')
for scope in ['disk create', 'snapshot create']:
with self.argument_context(scope) as c:
c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name')
# endregion
# region Disks
with self.argument_context('disk') as c:
c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to.
c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.')
c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10")
c.argument('upload_size_bytes', type=int, min_api='2019-03-01',
help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520')
c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time')
c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes')
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
c.argument('edge_zone', edge_zone_type)
c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01')
c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01')
# endregion
# region Snapshots
with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c:
c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'))
c.argument('name', arg_type=name_arg_type)
c.argument('sku', arg_type=snapshot_sku)
c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01',
help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed')
c.argument('edge_zone', edge_zone_type)
c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.')
# endregion
# region Images
with self.argument_context('image') as c:
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']))
c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images'))
c.argument('tags', tags_type)
with self.argument_context('image create') as c:
# here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources
c.argument('name', arg_type=name_arg_type, help='new image name')
c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name')
c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name')
c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. '
'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage')
c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.')
c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.")
c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes),
help="Storage caching type for the image's data disk.")
c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.')
c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots')
c.argument('edge_zone', edge_zone_type, )
# endregion
# region Image Templates
with self.argument_context('image builder') as c:
ib_output_name_help = "Name of the image builder run output."
c.argument('location', get_location_type(self.cli_ctx))
c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL."
" Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'")
c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.")
c.argument('image_template_name', image_template_name_type, help="The name of the image template.")
c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image")
c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.')
c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." '
'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.')
c.argument('output_name', help=ib_output_name_help)
c.ignore('destinations_lists', 'scripts_list', 'source_dict')
with self.argument_context('image builder create') as c:
ib_source_type = CLIArgumentType(arg_group="Image Source")
ib_customizer_type = CLIArgumentType(arg_group="Customizer")
ib_cutput_type = CLIArgumentType(arg_group="Output")
c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.")
c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json')
c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.')
# VM profile
c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)')
c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size')
c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name')
c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine')
# Image Source Arguments
c.argument('source', arg_type=ib_source_type)
c.argument('checksum', arg_type=ib_source_type)
c.argument('', arg_type=ib_source_type)
# Image Customizer Arguments
c.argument('scripts', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
c.argument('', arg_type=ib_customizer_type)
# Image Output Arguments
c.argument('managed_image_destinations', arg_type=ib_cutput_type)
c.argument('shared_image_destinations', arg_type=ib_cutput_type)
c.argument('output_name', arg_type=ib_cutput_type)
with self.argument_context('image builder output') as c:
ib_sig_regions_help = "Space-separated list of regions to replicate the image version into."
ib_img_location_help = "Location where the customized image will be created."
c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.")
c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help)
c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.")
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help)
with self.argument_context('image builder output add') as c:
ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help']
ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"])
ib_default_loc_help = " Defaults to resource group's location."
c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.")
c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help)
c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help)
c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true')
c.argument('tags', arg_type=ib_artifact_tags_type)
c.ignore('location')
with self.argument_context('image builder customizer') as c:
ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart")
ib_win_update_type = CLIArgumentType(arg_group="Windows Update")
ib_script_type = CLIArgumentType(arg_group="Shell and Powershell")
ib_powershell_type = CLIArgumentType(arg_group="Powershell")
ib_file_customizer_type = CLIArgumentType(arg_group="File")
c.argument('customizer_name', help="Name of the customizer.")
c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType))
# Script Args
c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.")
c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.")
# Powershell Specific Args
c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers")
# Windows Restart Specific Args
c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.")
c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.")
c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m")
# Windows Update Specific Args
c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.')
c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)')
c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)')
# File Args
c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.")
c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image")
# endregion
# region AvailabilitySets
with self.argument_context('vm availability-set') as c:
c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
with self.argument_context('vm availability-set create') as c:
c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set')
c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.')
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.')
c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks')
with self.argument_context('vm availability-set update') as c:
if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'):
c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set')
c.argument('availability_set_name', options_list=['--availability-set-name'])
# endregion
# region VirtualMachines
with self.argument_context('vm') as c:
c.argument('vm_name', existing_vm_name)
c.argument('size', completer=get_vm_size_completion_list)
c.argument('name', arg_type=name_arg_type)
c.argument('zone', zone_type, min_api='2017-03-30')
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH']))
c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network')
c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.')
with self.argument_context('vm capture') as c:
c.argument('overwrite', action='store_true')
with self.argument_context('vm update') as c:
c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to")
c.argument('write_accelerator', nargs='*', min_api='2017-12-01',
help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2")
c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM.')
c.argument('size', help='The VM size to be updated. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
with self.argument_context('vm create') as c:
c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines'))
c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None)
c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage')
c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.')
c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.')
c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network')
c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE']))
c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('boot_diagnostics_storage',
help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS')
c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network',
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE):
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine")
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.')
c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01')
c.argument('platform_fault_domain', min_api='2020-06-01',
help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view')
c.argument('count', type=int, is_preview=True,
help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.')
c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01',
help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.')
c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable secure boot. It is part of trusted launch.')
c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Enable vTPM. It is part of trusted launch.')
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vm create', arg_group='Storage') as c:
c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.')
with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.")
with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c:
c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.")
with self.argument_context('vm open-port') as c:
c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.')
c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name)
c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true')
c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.")
c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int)
for scope in ['vm show', 'vm list']:
with self.argument_context(scope) as c:
c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow')
for scope in ['vm show', 'vmss show']:
with self.argument_context(scope) as c:
c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01')
for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']:
with self.argument_context(scope) as c:
c.ignore('include_user_data')
with self.argument_context('vm diagnostics') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'])
with self.argument_context('vm diagnostics set') as c:
c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
with self.argument_context('vm install-patches') as c:
c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)')
c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.')
c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.')
c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.')
c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only')
c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only')
c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only")
c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only')
with self.argument_context('vm disk') as c:
c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'))
c.argument('new', action='store_true', help='create a new disk')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
with self.argument_context('vm disk attach') as c:
c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator')
c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)],
help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name',
completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
with self.argument_context('vm disk detach') as c:
c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.')
with self.argument_context('vm encryption enable') as c:
c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.')
# Place aad arguments in their own group
aad_arguments = 'Azure Active Directory'
c.argument('aad_client_id', arg_group=aad_arguments)
c.argument('aad_client_secret', arg_group=aad_arguments)
c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments)
with self.argument_context('vm extension') as c:
c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1')
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name')
c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True))
with self.argument_context('vm extension list') as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm secret') as c:
c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'')
c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault)
c.argument('certificate', help='key vault certificate name or its full secret URL')
c.argument('certificate_store', help='Windows certificate store names. Default: My')
with self.argument_context('vm secret list') as c:
c.argument('vm_name', arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm image') as c:
c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('plan', help='image billing plan')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('version', help="image sku's version")
c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted")
with self.argument_context('vm image list') as c:
c.argument('image_location', get_location_type(self.cli_ctx))
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-offers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-skus') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image list-publishers') as c:
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image show') as c:
c.argument('skus', options_list=['--sku', '-s'])
c.argument('edge_zone', edge_zone_type)
with self.argument_context('vm image terms') as c:
c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted')
c.argument('publisher', help='Image publisher')
c.argument('offer', help='Image offer')
c.argument('plan', help='Image billing plan')
with self.argument_context('vm nic') as c:
c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None)
c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics)
c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.')
with self.argument_context('vm nic show') as c:
c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic)
with self.argument_context('vm unmanaged-disk') as c:
c.argument('new', action='store_true', help='Create a new disk.')
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.')
c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd")
with self.argument_context('vm unmanaged-disk attach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
with self.argument_context('vm unmanaged-disk detach') as c:
c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.')
for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']:
with self.argument_context(scope) as c:
c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None)
with self.argument_context('vm unmanaged-disk list') as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
with self.argument_context('vm user') as c:
c.argument('username', options_list=['--username', '-u'], help='The user name')
c.argument('password', options_list=['--password', '-p'], help='The user password')
with self.argument_context('vm list-skus') as c:
c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted")
c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones")
c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(),
help="show all information including vm sizes not available under the current subscription")
c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc')
with self.argument_context('vm restart') as c:
c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.')
with self.argument_context('vm host') as c:
c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group")
c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host")
c.ignore('expand')
with self.argument_context('vm host create') as c:
c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int,
help="Fault domain of the host within a group. Allowed values: 0, 1, 2")
c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(),
help="Replace the host automatically if a failure occurs")
c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes),
help="The software license type that will be applied to the VMs deployed on the dedicated host.")
c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/")
with self.argument_context('vm host list') as c:
c.argument('host_group_name', id_part=None)
with self.argument_context('vm host group') as c:
c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group")
c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Specify whether virtual machines or virtual machine scale sets can be placed automatically '
'on the dedicated host group. Automatic placement means resources are allocated on dedicated '
'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to '
'false when not provided.')
with self.argument_context('vm host group create') as c:
c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int,
help="Number of fault domains that the host group can span.")
c.argument('zones', zone_type)
for scope in ["vm host", "vm host group"]:
with self.argument_context("{} create".format(scope)) as c:
location_type = get_location_type(self.cli_ctx)
custom_location_msg = " Otherwise, location will default to the resource group's location"
custom_location_type = CLIArgumentType(overrides=location_type,
help=location_type.settings["help"] + custom_location_msg)
c.argument('location', arg_type=custom_location_type)
# endregion
# region VMSS
scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name']
with self.argument_context('vmss') as c:
c.argument('zones', zones_type, min_api='2017-03-30')
c.argument('instance_id', id_part='child_name_1')
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself')
c.argument('tags', tags_type)
c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes))
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type)
c.argument('host_group', min_api='2020-06-01',
help='Name or ID of dedicated host group that the virtual machine scale set resides in')
for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']:
with self.argument_context(scope) as c:
for dest in scaleset_name_aliases:
c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter
with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c:
VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE)
c.argument('name', name_arg_type)
c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.')
c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group."
" See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.")
c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01')
c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.')
c.argument('instance_count', help='Number of VMs in the scale set.', type=int)
c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true')
c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode))
c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs')
c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network')
c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None),
help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set")
c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids)
c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long')
c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.',
arg_type=get_enum_type(['Uniform', 'Flexible']))
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('automatic_repairs_grace_period', min_api='2018-10-01',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.')
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('network_api_version', min_api='2021-03-01',
help="Specify the Microsoft.Network API version used when creating networking resources in the Network "
"Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default "
"value is 2020-11-01.")
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later')
c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01',
help='Indicate whether Automatic Updates is enabled for the Windows virtual machine')
c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01',
help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true')
with self.argument_context('vmss create', arg_group='Network Balancer') as c:
LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway'])
c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.')
c.argument('app_gateway_sku', help='SKU when creating a new application gateway.')
c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.')
c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.')
c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int)
c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb'])
c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName),
help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'")
c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'])
with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c:
c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules")
c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`")
c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6")
c.argument('accelerated_networking', arg_type=get_three_state_flag(),
help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size")
with self.argument_context('vmss update') as c:
protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01')
c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.")
c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).")
c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(),
help='Enable terminate notification')
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('scale_in_policy', scale_in_policy_type)
c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01',
help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints')
c.argument('spot_restore_timeout', min_api='2021-04-01',
help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances')
c.argument('vm_sku', help='Size of VM instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True)
with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c:
c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs')
c.argument(
'automatic_repairs_grace_period',
help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.'
)
for scope in ['vmss create', 'vmss update']:
with self.argument_context(scope) as c:
c.argument('terminate_notification_time', min_api='2019-03-01',
help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted')
c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%')
c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%')
c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01',
help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%')
c.argument('pause_time_between_batches', min_api='2020-12-01',
help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds')
c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size')
c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01',
help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances')
for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]:
with self.argument_context(scope) as c:
c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix))
for scope in ['vmss update-instances', 'vmss delete-instances']:
with self.argument_context(scope) as c:
c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.')
with self.argument_context('vmss diagnostics') as c:
c.argument('vmss_name', id_part=None, help='Scale set name')
with self.argument_context('vmss disk') as c:
options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']]
new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list)
c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.')
c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int)
c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances',
min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks'))
c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01')
c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU')
with self.argument_context('vmss encryption') as c:
c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'))
with self.argument_context('vmss extension') as c:
c.argument('extension_name', name_arg_type, help='Name of the extension.')
c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss nic') as c:
c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name')
c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1')
c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2')
with self.argument_context('vmss nic list') as c:
c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None)
with self.argument_context('vmss set-orchestration-service-state') as c:
c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.')
c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.')
# endregion
# region VM & VMSS Shared
for scope in ['vm', 'vmss']:
with self.argument_context(scope) as c:
c.argument('no_auto_upgrade',
options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')],
arg_type=get_three_state_flag(),
help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.')
with self.argument_context('{} run-command'.format(scope)) as c:
c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope))
if scope == 'vmss':
c.argument('vmss_name', vmss_name_type)
with self.argument_context('{} run-command invoke'.format(scope)) as c:
c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'")
c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file")
with self.argument_context('{} stop'.format(scope)) as c:
c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01')
for scope in ['vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity remove', 'vmss identity remove']:
with self.argument_context(scope) as c:
c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID))
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm identity show', 'vmss identity show']:
with self.argument_context(scope) as c:
c.argument('vm_name', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
for scope in ['vm application set', 'vmss application set']:
with self.argument_context(scope) as c:
c.argument('vm', existing_vm_name)
c.argument('vmss_name', vmss_name_type)
c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.")
c.argument('order_applications', action='store_true', help='Whether set order index at each gallery applications, the order index starts from 1.')
c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*',
help='Space-separated application configuration overrides for each application version ids. '
'It should have the same number of items as the application version ids. Null is available for a application '
'which does not have a configuration override.')
for scope in ['vm application list', 'vmss application list']:
with self.argument_context(scope) as c:
c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None)
c.argument('vmss_name', vmss_name_type, id_part=None)
for scope in ['vm create', 'vmss create']:
with self.argument_context(scope) as c:
c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location')
c.argument('tags', tags_type)
c.argument('no_wait', help='Do not wait for the long-running operation to finish.')
c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true')
c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.')
c.argument('image', completer=get_urn_aliases_completion_list)
c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type)
c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter())
c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples")
c.ignore('aux_subscriptions')
c.argument('edge_zone', edge_zone_type)
with self.argument_context(scope, arg_group='Authentication') as c:
c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory')
c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.')
c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.")
c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+')
c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.')
c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all']))
with self.argument_context(scope, arg_group='Storage') as c:
if DiskStorageAccountTypes:
allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes])
else:
allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS'])
usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.'
allowed_values = 'Allowed values: {}.'.format(allowed_values)
storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \
'or specify individual disks. {} {}'.format(usage, allowed_values)
c.argument('os_disk_name', help='The name of the new VM OS disk.')
c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux']))
c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.")
c.argument('storage_sku', nargs='+', help=storage_sku_help)
c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds")
c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile')
c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM')
c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.')
c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create')
c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type')
c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes))
c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+',
help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk")
c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type)
c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01',
help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True)
c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.')
c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01',
help='Names or IDs (space delimited) of disk encryption sets for data disks.')
c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.')
c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.')
c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.')
c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01',
help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.')
c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)],
nargs='+', min_api='2021-03-01',
help='Specify whether data disk should be deleted or detached upon VM deletion.')
with self.argument_context(scope, arg_group='Network') as c:
c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.')
c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.')
c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.')
c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.')
c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).')
c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).')
c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static']))
c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.')
if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK):
PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK)
c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'',
default=None, arg_type=get_enum_type(PublicIPAddressSkuName))
c.argument('nic_delete_option', nargs='+', min_api='2021-03-01',
help='Specify what happens to the network interface when the VM is deleted. Use a singular '
'value to apply on all resources, or use <Name>=<Value> to configure '
'the delete behavior for individual resources. Possible options are Delete and Detach.')
with self.argument_context(scope, arg_group='Marketplace Image Plan') as c:
c.argument('plan_name', help='plan name')
c.argument('plan_product', help='plan product')
c.argument('plan_publisher', help='plan publisher')
c.argument('plan_promotion_code', help='plan promotion code')
for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']:
with self.argument_context(scope) as c:
arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None
c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access")
c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have")
c.ignore('identity_role_id')
with self.argument_context('vm auto-shutdown') as c:
c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.')
c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)')
c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730')
c.argument('webhook', help='The webhook URL to which the notification will be sent')
c.argument('location', validator=get_default_location_from_resource_group)
for scope in ['vm diagnostics', 'vmss diagnostics']:
with self.argument_context(scope) as c:
c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied')
c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter())
c.argument('is_windows_os', action='store_true', help='for Windows VMs')
for scope in ['vm encryption', 'vmss encryption']:
with self.argument_context(scope) as c:
c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL']))
c.argument('force', action='store_true', help='continue by ignoring client side validation errors')
c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.')
c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.')
c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.')
for scope in ['vm extension', 'vmss extension']:
with self.argument_context(scope) as c:
c.argument('publisher', help='The name of the extension publisher.')
c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.')
c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.')
c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.')
c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(),
help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.')
with self.argument_context('vm extension set') as c:
c.argument('vm_extension_name', name_arg_type,
completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'),
help='Name of the extension.', id_part=None)
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
with self.argument_context('vmss extension set', min_api='2017-12-01') as c:
c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.')
c.argument('extension_instance_name', extension_instance_name_type)
c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.')
for scope in ['vm extension image', 'vmss extension image']:
with self.argument_context(scope) as c:
c.argument('image_location', options_list=['--location', '-l'], help='Image location.')
c.argument('name', help='Image name', id_part=None)
c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name')
c.argument('type', options_list=['--name', '-n'], help='Name of the extension')
c.argument('latest', action='store_true', help='Show the latest version only.')
c.argument('version', help='Extension version')
c.argument('orderby', help="the $orderby odata query option")
c.argument('top', help='the $top odata query option')
for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']:
with self.argument_context(scope) as c:
license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \
"To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \
"To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \
"For more information see the Azure Windows VM online docs."
c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None']))
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True,
help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons')
c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'],
help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.',
min_api='2021-04-01', is_preview=True)
with self.argument_context('vm update') as c:
c.argument('license_type', help=license_msg, arg_type=get_enum_type(
['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None']))
c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01')
with self.argument_context('vmss create') as c:
c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01',
arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None),
help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.")
with self.argument_context('sig') as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition')
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version')
for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']:
with self.argument_context(scope) as c:
c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition')
with self.argument_context('sig list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('shared_to', shared_to_type)
with self.argument_context('sig show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
for scope in ['sig share add', 'sig share remove']:
with self.argument_context(scope) as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.')
c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.')
with self.argument_context('sig share add') as c:
c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share remove') as c:
c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True),
help='distinguish add operation and remove operation')
with self.argument_context('sig share reset') as c:
c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name')
with self.argument_context('sig image-definition create') as c:
c.argument('offer', options_list=['--offer', '-f'], help='image offer')
c.argument('sku', options_list=['--sku', '-s'], help='image sku')
c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher')
c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD')
c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.")
c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.')
c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores')
c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores')
c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB')
c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB')
c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan')
c.argument('plan_name', help='plan name', arg_group='Purchase plan')
c.argument('plan_product', help='plan product', arg_group='Purchase plan')
c.argument('eula', help='The Eula agreement for the gallery image')
c.argument('privacy_statement_uri', help='The privacy statement uri')
c.argument('release_note_uri', help='The release note uri')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS')
c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"')
with self.argument_context('sig image-definition list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-definition show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
with self.argument_context('sig create') as c:
c.argument('description', help='the description of the gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig update') as c:
c.ignore('gallery')
c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile',
min_api='2020-09-30', is_experimental=True,
help='This property allows you to specify the permission of sharing gallery.')
c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True,
help='Enable soft-deletion for resources in this gallery, '
'allowing them to be recovered within retention time.')
with self.argument_context('sig image-definition create') as c:
c.argument('description', help='the description of the gallery image definition')
with self.argument_context('sig image-definition update') as c:
c.ignore('gallery_image')
with self.argument_context('sig image-version') as c:
deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0")
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c:
c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'],
help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`')
c.argument('description', help='the description of the gallery image version')
c.argument('managed_image', help='image name(if in the same resource group) or resource id')
c.argument('os_snapshot', help='Name or ID of OS disk snapshot')
c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots')
c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots')
c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.')
c.argument('version', help='image version')
c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'")
c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions",
arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01')
c.argument('target_region_encryption', nargs='+',
help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.')
c.argument('os_vhd_uri', help='Source VHD URI of OS disk')
c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk')
c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks')
c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks')
c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks')
c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.')
with self.argument_context('sig image-version list-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('shared_to', shared_to_type)
with self.argument_context('sig image-version show') as c:
c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'")
with self.argument_context('sig image-version show-shared') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.',
id_part='child_name_1')
c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name '
'of the Shared Gallery Image Definition from which the Image Versions are to be listed.',
id_part='child_name_2')
c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The '
'name of the gallery image version to be created. Needs to follow semantic version name pattern: '
'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. '
'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3')
for scope in ['sig image-version create', 'sig image-version update']:
with self.argument_context(scope) as c:
c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace,
help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. '
'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used')
c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int)
# endregion
# region Proximity Placement Group
with self.argument_context('ppg', min_api='2018-04-01') as c:
c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.")
with self.argument_context('ppg create', min_api='2018-04-01') as c:
c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.")
c.argument('tags', tags_type)
with self.argument_context('ppg show', min_api='2019-07-01') as c:
c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.')
for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'),
('vm availability-set create', 'availability set'),
('vm update', 'VM'), ('vmss update', 'VMSS'),
('vm availability-set update', 'availability set')]:
with self.argument_context(scope, min_api='2018-04-01') as c:
c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item),
validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added.
# endregion
# region VM Monitor
with self.argument_context('vm monitor log show') as c:
c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.")
c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.")
with self.argument_context('vm monitor metrics') as c:
c.argument('metricnamespace', options_list=['--namespace'],
help='Namespace to query metric definitions for.')
with self.argument_context('vm monitor metrics tail') as c:
from azure.mgmt.monitor.models import AggregationType
c.extra('resource_group_name', required=True)
c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
c.argument('metadata', action='store_true')
c.argument('dimension', nargs='*', validator=validate_metric_dimension)
c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*')
c.argument('metrics', nargs='*')
c.argument('orderby',
help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc')
c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.')
c.argument('filters', options_list=['--filter'])
c.argument('metric_namespace', options_list=['--namespace'])
with self.argument_context('vm monitor metrics tail', arg_group='Time') as c:
c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.'))
c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.'))
c.argument('offset', type=get_period_type(as_timedelta=True))
c.argument('interval', arg_group='Time', type=get_period_type())
with self.argument_context('vm monitor metrics list-definitions') as c:
c.extra('resource_group_name', required=True)
c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None)
# endregion
# region disk encryption set
with self.argument_context('disk-encryption-set') as c:
c.argument('disk_encryption_set_name', disk_encryption_set_name)
c.argument('key_url', help='URL pointing to a key or secret in KeyVault.')
c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.')
c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']),
help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01',
options_list=['--enable-auto-key-rotation', '--auto-rotation'],
help='Enable automatic rotation of keys.')
# endregion
# region DiskAccess
with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c:
c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name')
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
# endRegion
with self.argument_context('capacity reservation group') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'],
help='The name of the capacity reservation group.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation group create') as c:
c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.')
with self.argument_context('capacity reservation group show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.')
with self.argument_context('capacity reservation group list') as c:
c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.')
c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.')
with self.argument_context('capacity reservation') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'],
help='The name of the capacity reservation group.')
c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'],
help='The name of the capacity reservation.')
c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.')
c.argument('tags', tags_type)
with self.argument_context('capacity reservation create') as c:
c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.')
c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.')
with self.argument_context('capacity reservation show') as c:
c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
|
57,551 |
def filtfilt(
waveform: Tensor,
a_coeffs: Tensor,
b_coeffs: Tensor,
clamp: bool = True,
) -> Tensor:
r"""Perform an IIR filter forward and backward to a waveform.
Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
Args:
waveform (Tensor): audio waveform of dimension of ``(..., time)``. Must be normalized to -1 to 1.
a_coeffs (Tensor): denominator coefficients of difference equation of dimension of ``(n_order + 1)``.
Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.
Must be same size as b_coeffs (pad with 0's as necessary).
b_coeffs (Tensor): numerator coefficients of difference equation of dimension of ``(n_order + 1)``.
Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.
Must be same size as a_coeffs (pad with 0's as necessary).
clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
Returns:
Tensor: Waveform with dimension of ``(..., time)``.
"""
forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp)
backward_filtered = lfilter(forward_filtered.flip(-1), a_coeffs, b_coeffs, clamp).flip(-1)
return backward_filtered
|
def filtfilt(
waveform: Tensor,
a_coeffs: Tensor,
b_coeffs: Tensor,
clamp: bool = True,
) -> Tensor:
r"""Perform an IIR filter forward and backward to a waveform.
Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
Args:
waveform (Tensor): audio waveform of dimension of ``(..., time)``. Must be normalized to -1 to 1.
a_coeffs (Tensor): denominator coefficients of difference equation of dimension of ``(n_order + 1)``.
Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.
Must be same size as b_coeffs (pad with 0's as necessary).
b_coeffs (Tensor): numerator coefficients of difference equation of dimension of ``(n_order + 1)``.
Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.
Must be same size as a_coeffs (pad with 0's as necessary).
clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
Returns:
Tensor: Waveform with dimension of ``(..., time)``.
"""
forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp).flip(-1)
backward_filtered = lfilter(forward_filtered, a_coeffs, b_coeffs, clamp).flip(-1)
return backward_filtered
|
14,811 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Unifi LED platform."""
from unifiled import unifiled
# Assign configuration variables.
# The configuration check takes care they are present.
_ip = config[CONF_HOST]
_port = config[CONF_PORT]
_username = config[CONF_USERNAME]
_password = config.get(CONF_PASSWORD)
api = unifiled(_ip, _port, username=_username, password=_password)
# Verify that passed in configuration works
if not api.getloginstate():
_LOGGER.error("Could not connect to unifiled controller")
return
# Add devices
add_entities(
UnifiLedLight(light, _ip, _port, _username, _password)
for light in api.getlights()
)
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Unifi LED platform."""
from unifiled import unifiled
# Assign configuration variables.
# The configuration check takes care they are present.
_ip = config[CONF_HOST]
_port = config[CONF_PORT]
_username = config[CONF_USERNAME]
_password = config[CONF_PASSWORD]
api = unifiled(_ip, _port, username=_username, password=_password)
# Verify that passed in configuration works
if not api.getloginstate():
_LOGGER.error("Could not connect to unifiled controller")
return
# Add devices
add_entities(
UnifiLedLight(light, _ip, _port, _username, _password)
for light in api.getlights()
)
|
47,624 |
def resize(
image,
size: Tuple[int, int],
resample=PIL.Image.Resampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
return_numpy: bool = True,
) -> np.ndarray:
"""
Resizes `image` to (h, w) specified by `size` using the PIL library.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`Tuple[int, int]`):
The size to use for resizing the image.
resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`):
The filter to user for resampling.
data_format (`ChannelDimension`, *optional*, defaults to `None`):
The channel dimension format of the output image. If `None`, will use the inferred format from the input.
return_numpy (`bool`, *optional*, defaults to `True`):
Whether or not to return the resized image as a numpy array. If False a PIL.Image.Image object is returned.
Returns:
image: A resized np.ndarray.
"""
if not len(size) == 2:
raise ValueError("size must have 2 elements")
# For all transformations, we want to keep the same data format as the input image unless otherwise specified.
# The resized image from PIL will always have channels last, so find the input format first.
data_format = infer_channel_dimension_format(image) if data_format is None else data_format
# To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
# the pillow library to resize the image and then convert back to numpy
if not isinstance(image, PIL.Image.Image):
# PIL expects image to have channels last
image = to_channel_dimension_format(image, ChannelDimension.LAST)
image = to_pil_image(image)
h, w = size
# PIL images are in the format (width, height)
resized_image = image.resize((w, h), resample=resample)
if return_numpy:
resized_image = np.array(resized_image)
resized_image = to_channel_dimension_format(resized_image, data_format)
return resized_image
|
def resize(
image,
size: Tuple[int, int],
resample=PIL.Image.Resampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
return_numpy: bool = True,
) -> np.ndarray:
"""
Resizes `image` to (h, w) specified by `size` using the PIL library.
Args:
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
The image to resize.
size (`Tuple[int, int]`):
The size to use for resizing the image.
resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`):
The filter to user for resampling.
data_format (`ChannelDimension`, *optional*, defaults to `None`):
The channel dimension format of the output image. If `None`, will use the inferred format from the input.
return_numpy (`bool`, *optional*, defaults to `True`):
Whether or not to return the resized image as a numpy array. If False a PIL.Image.Image object is returned.
Returns:
image: A resized np.ndarray.
"""
if not len(size) == 2:
raise ValueError("size must have 2 elements")
# For all transformations, we want to keep the same data format as the input image unless otherwise specified.
# The resized image from PIL will always have channels last, so find the input format first.
data_format = infer_channel_dimension_format(image) if data_format is None else data_format
# To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
# the pillow library to resize the image and then convert back to numpy
if not isinstance(image, PIL.Image.Image):
# PIL expects image to have channels last
image = to_channel_dimension_format(image, ChannelDimension.LAST)
image = to_pil_image(image)
height, width = size
# PIL images are in the format (width, height)
resized_image = image.resize((w, h), resample=resample)
if return_numpy:
resized_image = np.array(resized_image)
resized_image = to_channel_dimension_format(resized_image, data_format)
return resized_image
|
9,049 |
def rate_server(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function no matter who
triggered it or where
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_server(5, 'You hit the server rate limit for this function.')
# server limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the servr limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.global_rate = rate
function.global_rate_message = message
return function
return add_attribute
|
def rate_server(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function no matter who
triggered it or where
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_server(5, 'You hit the server rate limit for this function.')
# server limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the servr limit
# as other rate limits don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.global_rate = rate
function.global_rate_message = message
return function
return add_attribute
|
22,640 |
def get_operation_and_args(commands):
operation_name = commands[0]
# Get the module & operation name
op_module, op_name = operation_name.split('.')
# Try to load the requested operation from the main operations package.
# If that fails, try to load from the user's operations package.
try:
op_module = import_module('pyinfra.operations.{0}'.format(op_module))
except ImportError:
try:
op_module = import_module('user_ops.{0}'.format(op_module))
except ImportError:
raise CliError('No such module: {0}'.format(op_module))
op = getattr(op_module, op_name, None)
if not op:
raise CliError('No such operation: {0}'.format(operation_name))
# Parse the arguments
operation_args = commands[1:]
if len(operation_args) == 1:
# Check if we're JSON (in which case we expect a list of two items:
# a list of args and a dict of kwargs).
try:
args, kwargs = json.loads(operation_args[0])
return op, (args, kwargs)
except ValueError:
pass
args = [
_parse_arg(arg)
for arg in operation_args if '=' not in arg
]
kwargs = {
key: _parse_arg(value)
for key, value in [
arg.split('=', 1)
for arg in operation_args if '=' in arg
]
}
return op, (args, kwargs)
|
def get_operation_and_args(commands):
operation_name = commands[0]
# Get the module & operation name
op_module, op_name = operation_name.split('.')
# Try to load the requested operation from the main operations package.
# If that fails, try to load from the user's operations package.
try:
op_module = import_module('pyinfra.operations.{0}'.format(op_module))
except ImportError:
try:
op_module = import_module(op_module)
except ImportError:
raise CliError('No such module: {0}'.format(op_module))
op = getattr(op_module, op_name, None)
if not op:
raise CliError('No such operation: {0}'.format(operation_name))
# Parse the arguments
operation_args = commands[1:]
if len(operation_args) == 1:
# Check if we're JSON (in which case we expect a list of two items:
# a list of args and a dict of kwargs).
try:
args, kwargs = json.loads(operation_args[0])
return op, (args, kwargs)
except ValueError:
pass
args = [
_parse_arg(arg)
for arg in operation_args if '=' not in arg
]
kwargs = {
key: _parse_arg(value)
for key, value in [
arg.split('=', 1)
for arg in operation_args if '=' in arg
]
}
return op, (args, kwargs)
|
21,614 |
def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]:
"""extract and parse the OIDC provider configs from the config dict
The configuration may contain either a single `oidc_config` object with an
`enabled: True` property, or a list of provider configurations under
`oidc_providers`, *or both*.
Returns a generator which yields the OidcProviderConfig objects
"""
validate_config(MAIN_CONFIG_SCHEMA, config, ())
for i, p in enumerate(config.get("oidc_providers")) or []:
yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,)))
# for backwards-compatibility, it is also possible to provide a single "oidc_config"
# object with an "enabled: True" property.
oidc_config = config.get("oidc_config")
if oidc_config and oidc_config.get("enabled", False):
# MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that
# it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA
# above), so now we need to validate it.
validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",))
yield _parse_oidc_config_dict(oidc_config, ("oidc_config",))
|
def _parse_oidc_provider_configs(config: JsonDict) -> Iterable["OidcProviderConfig"]:
"""extract and parse the OIDC provider configs from the config dict
The configuration may contain either a single `oidc_config` object with an
`enabled: True` property, or a list of provider configurations under
`oidc_providers`, *or both*.
Returns a generator which yields the OidcProviderConfig objects
"""
validate_config(MAIN_CONFIG_SCHEMA, config, ())
for i, p in enumerate(config.get("oidc_providers") or []):
yield _parse_oidc_config_dict(p, ("oidc_providers", "<item %i>" % (i,)))
# for backwards-compatibility, it is also possible to provide a single "oidc_config"
# object with an "enabled: True" property.
oidc_config = config.get("oidc_config")
if oidc_config and oidc_config.get("enabled", False):
# MAIN_CONFIG_SCHEMA checks that `oidc_config` is an object, but not that
# it matches OIDC_PROVIDER_CONFIG_SCHEMA (see the comments on OIDC_CONFIG_SCHEMA
# above), so now we need to validate it.
validate_config(OIDC_PROVIDER_CONFIG_SCHEMA, oidc_config, ("oidc_config",))
yield _parse_oidc_config_dict(oidc_config, ("oidc_config",))
|
1,653 |
def test_not_fitted_tree():
# Testing if not fitted tree throws the correct error
clf = DecisionTreeRegressor()
out = StringIO()
with pytest.raises(NotFittedError):
plot_tree(clf, out)
clf = DecisionTreeClassifier()
out = StringIO()
with pytest.raises(NotFittedError):
plot_tree(clf, out)
|
def test_not_fitted_tree(pyplot):
# Testing if not fitted tree throws the correct error
clf = DecisionTreeRegressor()
out = StringIO()
with pytest.raises(NotFittedError):
plot_tree(clf, out)
clf = DecisionTreeClassifier()
out = StringIO()
with pytest.raises(NotFittedError):
plot_tree(clf, out)
|
57,314 |
def benchmark_disk(
sizes=["1 kiB", "100 kiB", "1 MiB", "10 MiB", "100 MiB"],
rootdir=None,
duration=1.0,
) -> dict:
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
with tmpdir(dir=rootdir) as dir:
dir = pathlib.Path(dir)
names = list(map(str, range(100)))
size = parse_bytes(size_str)
data = os.urandom(size)
start = time()
total = 0
while time() < start + duration:
with open(dir / random.choice(names), mode="ab") as f:
f.write(data)
os.fsync(f)
total += size
out[size_str] = total / (time() - start)
return out
|
def benchmark_disk(
sizes=["1 kiB", "100 kiB", "1 MiB", "10 MiB", "100 MiB"],
rootdir=None,
duration=1.0,
) -> dict:
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
with tmpdir(dir=rootdir) as dir:
dir = pathlib.Path(dir)
names = list(map(str, range(100)))
size = parse_bytes(size_str)
data = os.urandom(size)
start = time()
total = 0
while time() < start + duration:
with open(dir / random.choice(names), mode="ab") as f:
f.write(data)
f.flush()
os.fsync(f.fileno())
total += size
out[size_str] = total / (time() - start)
return out
|
54,068 |
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
description='Plots genomic tracks on specified region(s). '
'Citation : Ramirez et al. High-resolution TADs reveal DNA '
'sequences underlying genome organization in flies. '
'Nature Communications (2018) doi:10.1038/s41467-017-02525-w',
usage="%(prog)s --tracks tracks.ini --region chr1:1000000-4000000 -o image.png")
parser.add_argument('--tracks',
help='File containing the instructions to plot the tracks. '
'The tracks.ini file can be genarated using the `make_tracks_file` program.',
type=argparse.FileType('r'),
required=True,
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--region',
help='Region to plot, the format is chr:start-end')
group.add_argument('--BED',
help='Instead of a region, a file containing the regions to plot, in BED format, '
'can be given. If this is the case, multiple files will be created using a prefix '
'the value of --outFileName',
type=argparse.FileType('r')
)
parser.add_argument('--width',
help='figure width in centimeters (default is {})'.format(DEFAULT_FIGURE_WIDTH),
type=float,
default=DEFAULT_FIGURE_WIDTH)
parser.add_argument('--height',
help='Figure height in centimeters. If not given, the figure height is computed '
'based on the heights of the tracks. If given, the track height are proportionally '
'scaled to match the desired figure height.',
type=float)
parser.add_argument('--title', '-t',
help='Plot title',
required=False)
parser.add_argument('--outFileName', '-out',
help='File name to save the image, file prefix in case multiple images '
'are stored',
required=True)
parser.add_argument('--fontSize',
help='Font size for the labels of the plot (default is 0.3 * figure width)',
type=float)
parser.add_argument('--dpi',
help='Resolution for the image in case the'
' ouput is a raster graphics image (e.g png, jpg) (default is 72)',
type=int,
default=72
)
parser.add_argument('--trackLabelFraction',
help='By default the space dedicated to the track labels is 0.05 of the'
' plot width. This fraction can be changed with this parameter if needed.',
default=0.05,
type=float)
parser.add_argument('--trackLabelHAlign',
help='By default, the horizontal alignment of the track '
'labels is left. This alignemnt can be changed to '
'right or center.',
default='left',
choices=['left', 'right', 'center'])
parser.add_argument('--decreasingXAxis',
help='By default, the x-axis is increasing but if you '
'want to see all the tracks with a decreasing X '
'axis. Put this option.',
action='store_true')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
|
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
description='Plots genomic tracks on specified region(s). '
'Citation : Ramirez et al. High-resolution TADs reveal DNA '
'sequences underlying genome organization in flies. '
'Nature Communications (2018) doi:10.1038/s41467-017-02525-w',
usage="%(prog)s --tracks tracks.ini --region chr1:1000000-4000000 -o image.png")
parser.add_argument('--tracks',
help='File containing the instructions to plot the tracks. '
'The tracks.ini file can be genarated using the `make_tracks_file` program.',
type=argparse.FileType('r'),
required=True,
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--region',
help='Region to plot, the format is chr:start-end')
group.add_argument('--BED',
help='Instead of a region, a file containing the regions to plot, in BED format, '
'can be given. If this is the case, multiple files will be created using a prefix '
'the value of --outFileName',
type=argparse.FileType('r')
)
parser.add_argument('--width',
help='figure width in centimeters (default is {})'.format(DEFAULT_FIGURE_WIDTH),
type=float,
default=DEFAULT_FIGURE_WIDTH)
parser.add_argument('--height',
help='Figure height in centimeters. If not given, the figure height is computed '
'based on the heights of the tracks. If given, the track height are proportionally '
'scaled to match the desired figure height.',
type=float)
parser.add_argument('--title', '-t',
help='Plot title',
required=False)
parser.add_argument('--outFileName', '-out',
help='File name to save the image, file prefix in case multiple images '
'are stored',
required=True)
parser.add_argument('--fontSize',
help='Font size for the labels of the plot (default is 0.3 * figure width)',
type=float)
parser.add_argument('--dpi',
help='Resolution for the image in case the'
' ouput is a raster graphics image (e.g png, jpg) (default is 72)',
type=int,
default=72
)
parser.add_argument('--trackLabelFraction',
help='By default the space dedicated to the track labels is 0.05 of the'
' plot width. This fraction can be changed with this parameter if needed.',
default=0.05,
type=float)
parser.add_argument('--trackLabelHAlign',
help='By default, the horizontal alignment of the track '
'labels is left. This alignemnt can be changed to '
'right or center.',
default='left',
choices=['left', 'right', 'center'])
parser.add_argument('--decreasingXAxis',
help='By default, the x-axis is increasing.'
'Use this option if you want to see all tracks with a decreasing x-axis.',
action='store_true')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
|
5,708 |
def _ebrahimi_entropy(X, m):
"""Compute the Ebrahimi estimator as described in [6]"""
# No equation number, but referred to as HE_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
i = np.arange(1, n+1).astype(float)
ci = np.ones_like(i)*2
ci[i <= m] = 1 + (i[i <= m] - 1)/m
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
logs = np.log(n * differences / (ci * m))
return np.mean(logs, axis=-1)
|
def _ebrahimi_entropy(X, m):
"""Compute the Ebrahimi estimator as described in [6]."""
# No equation number, but referred to as HE_mn
n = X.shape[-1]
X = _pad_along_last_axis(X, m)
differences = X[..., 2 * m:] - X[..., : -2 * m:]
i = np.arange(1, n+1).astype(float)
ci = np.ones_like(i)*2
ci[i <= m] = 1 + (i[i <= m] - 1)/m
ci[i >= n - m + 1] = 1 + (n - i[i >= n-m+1])/m
logs = np.log(n * differences / (ci * m))
return np.mean(logs, axis=-1)
|
13,264 |
def binary_operation(
func: Callable[[X, X], Y],
*,
associative: bool = True,
commutative: bool = True,
identity: Union[X, InferType, None] = infer,
distributes_over: Callable[[X, X], X] = None,
except_: Except = (),
style: str = "pytest",
) -> str:
"""Write property tests for the binary operation ``func``.
While :wikipedia:`binary operations <Binary_operation>` are not particularly
common, they have such nice properties to test that it seems a shame not to
demonstrate them with a ghostwriter. For an operator `f`, test that:
- if :wikipedia:`associative <Associative_property>`,
``f(a, f(b, c)) == f(f(a, b), c)``
- if :wikipedia:`commutative <Commutative_property>`, ``f(a, b) == f(b, a)``
- if :wikipedia:`identity <Identity_element>` is not None, ``f(a, identity) == a``
- if :wikipedia:`distributes_over <Distributive_property>` is ``+``,
``f(a, b) + f(b, c)) == f(a, b+c)``
For example:
.. code-block:: python
ghostwriter.binary_operation(
operator.mul,
identity=1,
inverse=operator.div,
distributes_over=operator.add,
style="unittest",
)
"""
if not callable(func):
raise InvalidArgument(f"Got non-callable func={func!r}")
except_ = _check_except(except_)
_check_style(style)
check_type(bool, associative, "associative")
check_type(bool, commutative, "commutative")
if distributes_over is not None and not callable(distributes_over):
raise InvalidArgument(
f"distributes_over={distributes_over!r} must be an operation which "
f"distributes over {func.__name__}"
)
if not any([associative, commutative, identity, distributes_over]):
raise InvalidArgument(
"You must select at least one property of the binary operation to test."
)
imports, body = _make_binop_body(
func,
associative=associative,
commutative=commutative,
identity=identity,
distributes_over=distributes_over,
except_=except_,
style=style,
)
return _make_test(imports, body)
|
def binary_operation(
func: Callable[[X, X], Y],
*,
associative: bool = True,
commutative: bool = True,
identity: Union[X, InferType, None] = infer,
distributes_over: Callable[[X, X], X] = None,
except_: Except = (),
style: str = "pytest",
) -> str:
"""Write property tests for the binary operation ``func``.
While :wikipedia:`binary operations <Binary_operation>` are not particularly
common, they have such nice properties to test that it seems a shame not to
demonstrate them with a ghostwriter. For an operator `f`, test that:
- if :wikipedia:`associative <Associative_property>`,
``f(a, f(b, c)) == f(f(a, b), c)``
- if :wikipedia:`commutative <Commutative_property>`, ``f(a, b) == f(b, a)``
- if :wikipedia:`identity <Identity_element>` is not None, ``f(a, identity) == a``
- if :wikipedia:`distributes_over <Distributive_property>` is ``+``,
``f(a, b) + f(a, c)) == f(a, b+c)``
For example:
.. code-block:: python
ghostwriter.binary_operation(
operator.mul,
identity=1,
inverse=operator.div,
distributes_over=operator.add,
style="unittest",
)
"""
if not callable(func):
raise InvalidArgument(f"Got non-callable func={func!r}")
except_ = _check_except(except_)
_check_style(style)
check_type(bool, associative, "associative")
check_type(bool, commutative, "commutative")
if distributes_over is not None and not callable(distributes_over):
raise InvalidArgument(
f"distributes_over={distributes_over!r} must be an operation which "
f"distributes over {func.__name__}"
)
if not any([associative, commutative, identity, distributes_over]):
raise InvalidArgument(
"You must select at least one property of the binary operation to test."
)
imports, body = _make_binop_body(
func,
associative=associative,
commutative=commutative,
identity=identity,
distributes_over=distributes_over,
except_=except_,
style=style,
)
return _make_test(imports, body)
|
35,275 |
def initialize_decomposition(tensor_slices, rank, init='random', svd='numpy_svd', random_state=None):
r"""Initiate a random PARAFAC2 decomposition given rank and tensor slices
Parameters
----------
tensor_slices : Iterable of ndarray
rank : int
init : {'random', 'svd', KruskalTensor, Parafac2Tensor}, optional
random_state : `np.random.RandomState`
Returns
-------
parafac2_tensor : Parafac2Tensor
List of initialized factors of the CP decomposition where element `i`
is of shape (tensor.shape[i], rank)
"""
shapes = [m.shape for m in tensor_slices]
if init == 'random':
return random_parafac2(shapes, rank, full=False, random_state=random_state)
elif init == 'svd':
try:
svd_fun = tl.SVD_FUNS[svd]
except KeyError:
message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
svd, tl.get_backend(), tl.SVD_FUNS)
raise ValueError(message)
padded_tensor = _pad_by_zeros(tensor_slices)
A = svd_fun(unfold(padded_tensor, 0), n_eigenvecs=rank)[0]
C = svd_fun(unfold(padded_tensor, 2), n_eigenvecs=rank)[0]
B = T.eye(rank)
projections = _compute_projections(tensor_slices, (A, B, C), svd_fun)
return Parafac2Tensor((None, (A, B, C), projections))
elif isinstance(init, (tuple, list, Parafac2Tensor, KruskalTensor)):
try:
decomposition = Parafac2Tensor.from_kruskaltensor(init, parafac2_tensor_ok=True)
except ValueError:
raise ValueError(
'If initialization method is a mapping, then it must '
'be possible to convert it to a Parafac2Tensor instance'
)
if decomposition.rank != rank:
raise ValueError('Cannot init with a decomposition of different rank')
return decomposition
raise ValueError('Initialization method "{}" not recognized'.format(init))
|
def initialize_decomposition(tensor_slices, rank, init='random', svd='numpy_svd', random_state=None):
r"""Initiate a random PARAFAC2 decomposition given rank and tensor slices
Parameters
----------
tensor_slices : Iterable of ndarray
rank : int
init : {'random', 'svd', KruskalTensor, Parafac2Tensor}, optional
random_state : `np.random.RandomState`
Returns
-------
parafac2_tensor : Parafac2Tensor
List of initialized factors of the CP decomposition where element `i`
is of shape (tensor.shape[i], rank)
"""
shapes = [m.shape for m in tensor_slices]
if init == 'random':
return random_parafac2(shapes, rank, full=False, random_state=random_state)
elif init == 'svd':
try:
svd_fun = tl.SVD_FUNS[svd]
except KeyError:
message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
svd, tl.get_backend(), tl.SVD_FUNS)
raise ValueError(message)
padded_tensor = _pad_by_zeros(tensor_slices)
A = svd_fun(unfold(padded_tensor, 0), n_eigenvecs=rank)[0]
C = svd_fun(unfold(padded_tensor, 2), n_eigenvecs=rank)[0]
B = T.eye(rank, **T.context(tensor_slices[0]))
projections = _compute_projections(tensor_slices, (A, B, C), svd_fun)
return Parafac2Tensor((None, (A, B, C), projections))
elif isinstance(init, (tuple, list, Parafac2Tensor, KruskalTensor)):
try:
decomposition = Parafac2Tensor.from_kruskaltensor(init, parafac2_tensor_ok=True)
except ValueError:
raise ValueError(
'If initialization method is a mapping, then it must '
'be possible to convert it to a Parafac2Tensor instance'
)
if decomposition.rank != rank:
raise ValueError('Cannot init with a decomposition of different rank')
return decomposition
raise ValueError('Initialization method "{}" not recognized'.format(init))
|
48,311 |
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module, 'wwn is required for adding initiator.')
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
module.log(msg='Added initiator {0}'.format(ini['id']))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(changed=changed)
|
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module, 'wwn is required for adding initiator.')
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
msg = 'Added initiator {0}'.format(ini['id'])
module.log(msg=msg)
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(changed=changed)
|
7,083 |
def hashbang_and_plugin_templating_clash(
templating: str, flines: List[str]
) -> Optional[str]:
"""Return file's hashbang/shebang, but raise TemplateVarLanguageClash
if plugin-set template engine and hashbang do not match.
Args:
templating: plugin-set template engine.
flines: the lines of text from file.
Returns:
the hashbang, in lower case, to allow for users using any of
['empy', 'EmPy', 'EMPY'], or similar in other templating languages.
Examples:
- Hashbang and templating_detected match:
>>> thisfunc = hashbang_and_plugin_templating_clash
>>> thisfunc('jinja2', ['#!Jinja2', 'stuff'])
'jinja2'
- Function returns nothing:
>>> thisfunc('', [''])
- Function raises if templating engines clash:
>>> thisfunc('empy', ['#!jinja2'])
Traceback (most recent call last):
...
cylc.flow.parsec.exceptions.TemplateVarLanguageClash: ...
"""
hashbang: Optional[str] = None
# Get hashbang if possible:
if flines:
match = re.match(r'^#!(\S+)', flines[0])
if match:
hashbang = match[1].lower()
if (
hashbang and templating
and templating != 'template variables'
and hashbang != templating
):
raise TemplateVarLanguageClash(
f"Plugins set templating engine = {templating}"
f" which does not match {flines[0]} set in flow.cylc."
)
return hashbang
|
def hashbang_and_plugin_templating_clash(
templating: str, flines: List[str]
) -> Optional[str]:
"""Return file's hashbang/shebang, but raise TemplateVarLanguageClash
if plugin-set template engine and hashbang do not match.
Args:
templating: Template engine set by a plugin.
flines: the lines of text from file.
Returns:
the hashbang, in lower case, to allow for users using any of
['empy', 'EmPy', 'EMPY'], or similar in other templating languages.
Examples:
- Hashbang and templating_detected match:
>>> thisfunc = hashbang_and_plugin_templating_clash
>>> thisfunc('jinja2', ['#!Jinja2', 'stuff'])
'jinja2'
- Function returns nothing:
>>> thisfunc('', [''])
- Function raises if templating engines clash:
>>> thisfunc('empy', ['#!jinja2'])
Traceback (most recent call last):
...
cylc.flow.parsec.exceptions.TemplateVarLanguageClash: ...
"""
hashbang: Optional[str] = None
# Get hashbang if possible:
if flines:
match = re.match(r'^#!(\S+)', flines[0])
if match:
hashbang = match[1].lower()
if (
hashbang and templating
and templating != 'template variables'
and hashbang != templating
):
raise TemplateVarLanguageClash(
f"Plugins set templating engine = {templating}"
f" which does not match {flines[0]} set in flow.cylc."
)
return hashbang
|
5,756 |
def get_matfile_version(file_name, appendmat=True):
"""
Return major, minor tuple depending on apparent mat file type
Where:
#. 0,x -> version 4 format mat files
#. 1,x -> version 5 format mat files
#. 2,x -> version 7.3 format mat files (HDF format)
Parameters
----------
file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True). Can also pass open file-like object.
appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present.
Returns
-------
major_version : {0, 1, 2}
major MATLAB File format version
minor_version : int
minor MATLAB file format version
Raises
------
MatReadError
If the file is empty.
ValueError
The matfile version is unknown.
Notes
-----
Has the side effect of setting the file read pointer to 0
"""
from .mio import _open_file_context
with _open_file_context(file_name, appendmat=appendmat) as fileobj:
return _get_matfile_version(fileobj)
|
def get_matfile_version(file_name, appendmat=True):
"""
Return major, minor tuple depending on apparent mat file type
Where:
#. 0,x -> version 4 format mat files
#. 1,x -> version 5 format mat files
#. 2,x -> version 7.3 format mat files (HDF format)
Parameters
----------
file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True). Can also pass open file-like object.
appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present. Default is True.
Returns
-------
major_version : {0, 1, 2}
major MATLAB File format version
minor_version : int
minor MATLAB file format version
Raises
------
MatReadError
If the file is empty.
ValueError
The matfile version is unknown.
Notes
-----
Has the side effect of setting the file read pointer to 0
"""
from .mio import _open_file_context
with _open_file_context(file_name, appendmat=appendmat) as fileobj:
return _get_matfile_version(fileobj)
|
1,830 |
def _check_unknown(values, known_values, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
known_values : array
Known values. Must be unique.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `know_values`.
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
valid_mask = None
if values.dtype.kind in 'UO':
uniques_set = set(known_values)
diff = set(values) - uniques_set
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
none_in_diff = None in diff
nan_in_diff = np.nan in diff
if none_in_diff and nan_in_diff:
raise ValueError("Input wiith both types of missing, None and "
"np.nan, is not supported")
if none_in_diff:
diff.remove(None)
diff = list(diff)
diff.append(None)
elif nan_in_diff:
diff.remove(np.nan)
diff = list(diff)
diff.append(np.nan)
else:
diff = list(diff)
else:
unique_values = np.unique(values)
diff = np.setdiff1d(unique_values, known_values,
assume_unique=True)
if return_mask:
if diff.size:
valid_mask = np.in1d(values, known_values)
else:
valid_mask = np.ones(len(values), dtype=bool)
# check for nans in the known_values
if np.isnan(known_values).any():
diff_is_nan = np.isnan(diff)
if diff_is_nan.any():
# removes nan from valid_mask
if diff.size and return_mask:
is_nan = np.isnan(values)
valid_mask[is_nan] = 1
# remove nan from diff
diff = diff[~diff_is_nan]
diff = list(diff)
if return_mask:
return diff, valid_mask
# valid_mask is
return diff
|
def _check_unknown(values, known_values, return_mask=False):
"""
Helper function to check for unknowns in values to be encoded.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : array
Values to check for unknowns.
known_values : array
Known values. Must be unique.
return_mask : bool, default False
If True, return a mask of the same shape as `values` indicating
the valid values.
Returns
-------
diff : list
The unique values present in `values` and not in `know_values`.
valid_mask : boolean array
Additionally returned if ``return_mask=True``.
"""
valid_mask = None
if values.dtype.kind in 'UO':
uniques_set = set(known_values)
diff = set(values) - uniques_set
if return_mask:
if diff:
valid_mask = np.array([val in uniques_set for val in values])
else:
valid_mask = np.ones(len(values), dtype=bool)
none_in_diff = None in diff
nan_in_diff = np.nan in diff
if none_in_diff and nan_in_diff:
raise ValueError("Input with both types of missing, None and "
"np.nan, is not supported")
if none_in_diff:
diff.remove(None)
diff = list(diff)
diff.append(None)
elif nan_in_diff:
diff.remove(np.nan)
diff = list(diff)
diff.append(np.nan)
else:
diff = list(diff)
else:
unique_values = np.unique(values)
diff = np.setdiff1d(unique_values, known_values,
assume_unique=True)
if return_mask:
if diff.size:
valid_mask = np.in1d(values, known_values)
else:
valid_mask = np.ones(len(values), dtype=bool)
# check for nans in the known_values
if np.isnan(known_values).any():
diff_is_nan = np.isnan(diff)
if diff_is_nan.any():
# removes nan from valid_mask
if diff.size and return_mask:
is_nan = np.isnan(values)
valid_mask[is_nan] = 1
# remove nan from diff
diff = diff[~diff_is_nan]
diff = list(diff)
if return_mask:
return diff, valid_mask
# valid_mask is
return diff
|
46,760 |
def test_rule_objects(schema_obj):
"""Ensure that all objects referenced in the schema rules are defined in
its object portion.
This test currently fails because rules files reference object keys for some object types,
including entities, columns, and metadata fields,
but reference "name" or "value" elements of the object definitions for other object types,
including suffixes and extensions.
In the case of datatypes, the key and "value" field are always the same.
Some other object types, such as associated_data, common_principles, formats, modalities,
and top_level_files, are not checked in the rules at all.
Additionally, this test only checks rules that fit the keys.
"""
OBJECT_TYPE_MAPPER = {
"metadata": "fields", # metadata in objects is referred to as fields in rules
}
not_found = [] # A list of undefined, but referenced, objects
object_types = list(schema_obj["objects"].keys())
for object_type in object_types:
# Find all uses of a given object type in the schema rules
type_instances_in_rules = _dict_key_lookup(
schema_obj["rules"],
OBJECT_TYPE_MAPPER.get(object_type, object_type),
)
if not type_instances_in_rules:
continue
for type_instance in type_instances_in_rules:
path, instance = type_instance
is_list = True
if isinstance(instance, dict):
instance = list(instance.keys())
is_list = False
for i_use, use in enumerate(instance):
if use == "derivatives":
# Skip derivatives folders, because the folder is treated as a "use" instead.
continue
elif "[]" in use:
# Rules may reference metadata fields with lists.
# This test can't handle this yet, so skip.
continue
elif "{}" in use:
# Rules may reference sub-dictionaries in metadata fields.
# This test can't handle this yet, so skip.
continue
if object_type in ["extensions", "suffixes"]:
# Some object types are referenced via their "value" fields in the rules
object_values = [
schema_obj["objects"][object_type][k]["value"]
for k in schema_obj["objects"][object_type].keys()
]
else:
# But other object types are referenced via their keys
object_values = list(schema_obj["objects"][object_type].keys())
# Build a list of items mentioned in rules, but not found in objects.
if use not in object_values:
temp_path = path[:]
if is_list:
temp_path[-1] += f"[{i_use}]"
temp_path.append(use)
not_found.append(temp_path)
if not_found:
not_found_string = "\n".join(
[".".join(sublist[:-1]) + " == " + sublist[-1] for sublist in not_found]
)
raise Exception(not_found_string)
|
def test_rule_objects(schema_obj):
"""Ensure that all objects referenced in the schema rules are defined in
its object portion.
This test currently fails because rules files reference object keys for some object types,
including entities, columns, and metadata fields,
but reference "name" or "value" elements of the object definitions for other object types,
including suffixes and extensions.
In the case of datatypes, the key and "value" field are always the same.
Some other object types, such as associated_data, common_principles, formats, modalities,
and top_level_files, are not checked in the rules at all.
Additionally, this test only checks rules that fit the keys.
"""
OBJECT_TYPE_MAPPER = {
"metadata": "fields", # metadata in objects is referred to as fields in rules
}
not_found = [] # A list of undefined, but referenced, objects
object_types = list(schema_obj["objects"].keys())
for object_type in object_types:
# Find all uses of a given object type in the schema rules
type_instances_in_rules = _dict_key_lookup(
schema_obj["rules"],
OBJECT_TYPE_MAPPER.get(object_type, object_type),
)
if not type_instances_in_rules:
continue
for type_instance in type_instances_in_rules:
path, instance = type_instance
is_list = True
if isinstance(instance, dict):
instance = list(instance.keys())
is_list = False
for i_use, use in enumerate(instance):
if use == "derivatives":
# Skip derivatives folders, because the folder is treated as a "use" instead.
continue
elif "[]" in use:
# Rules may reference metadata fields with lists.
# This test can't handle this yet, so skip.
continue
elif "{}" in use:
# Rules may reference sub-dictionaries in metadata fields.
# This test can't handle this yet, so skip.
continue
if object_type in ["extensions", "suffixes"]:
# Some object types are referenced via their "value" fields in the rules
object_values = [
schema_obj["objects"][object_type][k]["value"]
for k in schema_obj["objects"][object_type].keys()
]
else:
# But other object types are referenced via their keys
object_values = list(schema_obj["objects"][object_type].keys())
# Build a list of items mentioned in rules, but not found in objects.
if use not in object_values:
temp_path = path[:]
if is_list:
temp_path[-1] += f"[{i_use}]"
temp_path.append(use)
not_found.append(temp_path)
if not_found:
not_found_string = "\n".join(
[f"{'.'.join(path)} == {val}" for path, val in not_found]
)
raise Exception(not_found_string)
|
5,457 |
def pytest_addoption(parser):
"""
register argparse-style options and ini-style config values.
"""
test_selection_group = parser.getgroup("Tests Selection")
test_selection_group.addoption(
"--from-filenames",
default=None,
help=(
"Pass a comma-separated list of file paths, and any test module which"
" corresponds to the specified file(s) will run. For example, if 'setup.py'"
" was passed, then the corresponding test files defined in"
" 'tests/filename_map.yml' would run. Absolute paths are assumed to be"
" files containing relative paths, one per line. Providing the paths in a"
" file can help get around shell character limits when the list of files is"
" long."
),
)
# Add deprecated CLI flag until we completely switch to PyTest
test_selection_group.addoption(
"--names-file", default=None, help="Deprecated option"
)
test_selection_group.addoption(
"--transport",
default="zeromq",
choices=("zeromq", "tcp"),
help=(
"Select which transport to run the integration tests with, zeromq or tcp."
" Default: %(default)s"
),
)
test_selection_group.addoption(
"--system-install",
action="store_true",
default=False,
help=("Use system installed salt."),
)
test_selection_group.addoption(
"--scripts-path",
help=(
"Run the tests using the scripts found in this location. This"
"option is used to test salt artifacts."
),
)
test_selection_group.addoption(
"--ssh",
"--ssh-tests",
dest="ssh",
action="store_true",
default=False,
help=(
"Run salt-ssh tests. These tests will spin up a temporary "
"SSH server on your machine. In certain environments, this "
"may be insecure! Default: False"
),
)
test_selection_group.addoption(
"--proxy",
"--proxy-tests",
dest="proxy",
action="store_true",
default=False,
help="Run proxy tests (DEPRECATED)",
)
test_selection_group.addoption(
"--run-slow",
action="store_true",
default=False,
help="Run slow tests.",
)
output_options_group = parser.getgroup("Output Options")
output_options_group.addoption(
"--output-columns",
default=80,
type=int,
help="Number of maximum columns to use on the output",
)
output_options_group.addoption(
"--no-colors",
"--no-colours",
default=False,
action="store_true",
help="Disable colour printing.",
)
# ----- Test Groups --------------------------------------------------------------------------------------------->
# This will allow running the tests in chunks
test_selection_group.addoption(
"--test-group-count",
dest="test-group-count",
type=int,
help="The number of groups to split the tests into",
)
test_selection_group.addoption(
"--test-group",
dest="test-group",
type=int,
help="The group of tests that should be executed",
)
|
def pytest_addoption(parser):
"""
register argparse-style options and ini-style config values.
"""
test_selection_group = parser.getgroup("Tests Selection")
test_selection_group.addoption(
"--from-filenames",
default=None,
help=(
"Pass a comma-separated list of file paths, and any test module which"
" corresponds to the specified file(s) will run. For example, if 'setup.py'"
" was passed, then the corresponding test files defined in"
" 'tests/filename_map.yml' would run. Absolute paths are assumed to be"
" files containing relative paths, one per line. Providing the paths in a"
" file can help get around shell character limits when the list of files is"
" long."
),
)
# Add deprecated CLI flag until we completely switch to PyTest
test_selection_group.addoption(
"--names-file", default=None, help="Deprecated option"
)
test_selection_group.addoption(
"--transport",
default="zeromq",
choices=("zeromq", "tcp"),
help=(
"Select which transport to run the integration tests with, zeromq or tcp."
" Default: %(default)s"
),
)
test_selection_group.addoption(
"--system-install",
action="store_true",
default=False,
help="Use system installed salt.",
)
test_selection_group.addoption(
"--scripts-path",
help=(
"Run the tests using the scripts found in this location. This"
"option is used to test salt artifacts."
),
)
test_selection_group.addoption(
"--ssh",
"--ssh-tests",
dest="ssh",
action="store_true",
default=False,
help=(
"Run salt-ssh tests. These tests will spin up a temporary "
"SSH server on your machine. In certain environments, this "
"may be insecure! Default: False"
),
)
test_selection_group.addoption(
"--proxy",
"--proxy-tests",
dest="proxy",
action="store_true",
default=False,
help="Run proxy tests (DEPRECATED)",
)
test_selection_group.addoption(
"--run-slow",
action="store_true",
default=False,
help="Run slow tests.",
)
output_options_group = parser.getgroup("Output Options")
output_options_group.addoption(
"--output-columns",
default=80,
type=int,
help="Number of maximum columns to use on the output",
)
output_options_group.addoption(
"--no-colors",
"--no-colours",
default=False,
action="store_true",
help="Disable colour printing.",
)
# ----- Test Groups --------------------------------------------------------------------------------------------->
# This will allow running the tests in chunks
test_selection_group.addoption(
"--test-group-count",
dest="test-group-count",
type=int,
help="The number of groups to split the tests into",
)
test_selection_group.addoption(
"--test-group",
dest="test-group",
type=int,
help="The group of tests that should be executed",
)
|
34,846 |
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
def migrate_domain_format(
domain_path: Union[Text, Path], out_path: Optional[Union[Text, Path]],
) -> None:
"""Converts 2.0 domain to 3.0 format."""
domain_path = Path(domain_path)
out_path = Path(out_path) if out_path else None
domain_parent_dir = domain_path.parent
migrate_file_only = domain_path.is_file()
# Ensure the backup location does not exist yet
# Note: We demand that file as well as folder with this name gets deleted before
# the command is run to avoid confusion afterwards.
suffix = "original_domain"
suffix = f"{suffix}.yml" if migrate_file_only else suffix
backup_location = domain_parent_dir / suffix
if backup_location.exists():
backup_location_str = "directory" if backup_location.isdir() else "file"
raise RasaException(
f"The domain from '{domain_path}' could not be migrated since the "
f"a {backup_location_str} {backup_location} already exists."
f"Please remove that there is no file or folder at {backup_location}."
)
# Choose a default output location if nothing was specified
if out_path is None:
suffix = DEFAULT_DOMAIN_PATH if migrate_file_only else "new_domain"
out_path = domain_parent_dir / suffix
# Ensure the output location is not already in-use
if not migrate_file_only:
if out_path.is_dir() and any(out_path.iterdir()):
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because that folder is not empty."
"Please remove the contents of the folder and try again."
)
else:
if out_path.is_file():
raise RasaException(
f"The domain from '{domain_path}' could not be migrated to "
f"{out_path} because a file already exists."
"Please remove the file and try again."
)
# Sanity Check: Assert the files to be migrated aren't in 3.0 format already
# Note: we do not enforce that the version tag is 2.0 everywhere + validate that
# migrate-able domain files are among these files later
original_files = (
[file for file in domain_path.iterdir() if Domain.is_domain_file(file)]
if domain_path.is_dir()
else [domain_path]
)
migrated_files = [
file
for file in original_files
if rasa.shared.utils.io.read_yaml_file(file).get("version") == "3.0"
]
if migrated_files:
raise RasaException(
f"Some of the given files ({[file for file in migrated_files]}) "
f"have already been migrated to Rasa 3.0 format. Please remove these "
f"migrated files (or replace them with files in 2.0 format) and try again."
)
# Validate given domain file(s) and migrate them
try:
created_out_dir = False
if not migrate_file_only:
if not out_path.is_dir():
rasa.shared.utils.io.raise_warning(
f"The out path provided did not exist yet. Created directory "
f"{out_path}."
)
out_path.mkdir(parents=True)
created_out_dir = True
backup_location.mkdir()
original_domain = _migrate_domain_files(
domain_path, backup_location, out_path
)
else:
if not Domain.is_domain_file(domain_path):
raise RasaException(
f"The file '{domain_path}' could not be validated as a "
f"domain file. Only domain yaml files can be migrated. "
)
original_domain = _create_back_up(domain_path, backup_location)
new_forms, updated_slots = _migrate_form_slots(original_domain)
new_slots = _migrate_auto_fill_and_custom_slots(original_domain, updated_slots)
_write_final_domain(domain_path, new_forms, new_slots, out_path)
rasa.shared.utils.cli.print_success(
f"Your domain file '{str(domain_path)}' was successfully migrated! "
f"The migrated version is now '{str(out_path)}'. "
f"The original domain file is backed-up at '{str(backup_location)}'."
)
except Exception as e:
# Remove the backups if migration couldn't be completed
if backup_location.is_dir():
shutil.rmtree(backup_location)
if out_path.is_dir():
if created_out_dir:
shutil.rmtree(out_path)
else: # just remove contained files so we do not mess with access rights
for f in out_path.glob("*"):
f.unlink()
if backup_location.is_file():
backup_location.unlink()
raise e
|
787 |
def compute_atlas_bundle(in_dir, subjects=None, group=None, mid_path='',
bundle_names=None, model_bundle_dir=None,
out_dir=None, merge_out=False, save_temp=False,
n_stream_min=10, n_stream_max=5000, n_point=20,
distance='mdf', comb_method='rlap',
skip_pairs=False):
"""Compute a population specific bundle atlas.
Given several segmented bundles as input, compute the atlas by combining
the bundles pairwise.
Parameters
----------
in_dir : str
Input folder.
subjects : str, optional
Path to a BIDS-like participants.tsv file with the IDs of the subjects
to be processed. If None, all folders in ``in_dir`` are considered as
subjects.
group : str, optional
Label to select a subject group when the tsv file defining subjects
has a ``group`` column. If None, all subjects are processed.
mid_path : str, optional
Intermediate path between ``in_dir`` and bundle files. Default is ''.
bundle_names : str, optional
Path to a tsv file with the names of the bundles to be processed. If
None, all trk files of the first subject will be considered as
bundle_names.
model_bundle_dir : str, optional
Directory with model bundles to be used as a reference to move all
bundles to a common space. If None, bundles are assumed to be in the
same space and no registration is performed.
out_dir : str, optional
Output directory. If None, the current working directory is used.
merge_out : boolean, optional
If True the resulting atlases of all bundles are combined into a single
file. Default is False.
save_temp : boolean, optional
If True the intermediate results of each tree level are saved in a temp
folder in trk and png formats. Default is False.
n_stream_min : int, optional
Bundles with less than ``n_stream_min`` streamlines wont be processed.
Default is 10.
n_stream_max : int, optional
Bundles with more than ``n_stream_max`` streamlines are cropped to have
that number and speed up the computation. Default is 5000.
n_point : int, optional
All streamlines are set to have ``n_point`` points. Default is 20.
distance : str, optional
Distance metric to be used to combine bundles. Default is 'mdf'. The
'mdf_se' distance uses only start/end points of streamlines.
comb_method : str, optional
Method used to combine each bundle pair. Default is 'rlap'.
skip_pairs : boolean, optional
If true bundle combination steps are randomly skipped. This helps to
obtain a sharper result. Default is False.
Returns
-------
atlas : list of Streamlines
A list with the computed atlas bundles.
atlas_merged : Streamlines
A single bundle containing all the computed atlas bundles together.
"""
if type(in_dir) != str:
raise TypeError("in_dir must be a string")
if type(mid_path) != str:
raise TypeError("mid_path must be a string")
if type(n_point) != int:
raise TypeError("n_point must be an int")
if isdir(in_dir) is False:
raise ValueError("Input directory does not exist")
if out_dir is None:
out_dir = getcwd()
if isdir(out_dir) is False:
raise ValueError("Output directory does not exist")
if n_stream_min < 1:
raise ValueError("n_stream_min must be >= 1")
if n_stream_max < 1:
raise ValueError("n_stream_max must be >= 1")
if n_point < 2:
raise ValueError("n_point must be >= 2")
print('Input directory:' + in_dir)
print('Output directory:' + out_dir)
# Create temporary folder
temp_dir = join(out_dir, 'temp')
if isdir(temp_dir):
print("There is already a /temp folder in out_dir. Deleting.")
rmtree(temp_dir)
mkdir(temp_dir)
# Get subjects (from in_dir or a BIDS-like parciticipants.tsv file)
if subjects is None:
files = listdir(in_dir)
subjects = [file for file in files if isdir(join(in_dir, file))]
else:
df = pd.read_csv(subjects, delimiter='\t', dtype='object')
if group is None:
subjects = list(df.participant)
else:
subjects = list(df.loc[df.group == group].participant)
subjects.sort() # necessary?
if len(set(subjects)) < len(subjects):
raise ValueError("Subjects cannot be duplicated")
print(str(len(subjects)) + " subjects to be processed:")
print(subjects)
# Get bundle names (from first subject folder or from tsv file)
if bundle_names is None:
bundle_dir = join(in_dir, subjects[0], mid_path)
print("Retrieving bundle names from " + bundle_dir)
if isdir(bundle_dir) is False:
raise ValueError("Path to subject bundles is incorrect")
files = listdir(bundle_dir)
trk_files = [file for file in files if file.endswith('.trk')]
bundle_names = [splitext(file)[0] for file in trk_files]
else:
df = pd.read_csv(bundle_names, delimiter='\t', dtype='object')
bundle_names = list(df.iloc[:, 0])
bundle_names.sort() # necessary?
if len(set(bundle_names)) < len(bundle_names):
raise ValueError("Bundle names cannot be duplicated")
print(str(len(bundle_names)) + " bundles to be processed:")
print(bundle_names)
# Create a dictionary with all bundle files
bundle_files = {}
for sub in subjects:
for bundle in bundle_names:
file = join(in_dir, sub, mid_path, bundle + '.trk')
bundle_files[(sub, bundle)] = file
# Get model bundle list
if model_bundle_dir is None:
model_bundles = None
else:
files = listdir(model_bundle_dir)
trk_files = [file for file in files if file.endswith('.trk')]
model_bundle_list = [splitext(file)[0] for file in trk_files]
if not all(x in model_bundle_list for x in bundle_names):
raise ValueError("Not all the specified bundles have a model")
model_bundles = {}
for bundle in bundle_names:
model_bundles[bundle] = join(model_bundle_dir, bundle + '.trk')
# Atlas building starts
atlas = []
for bundle in bundle_names:
print("Processing bundle: " + bundle)
step_dir = join(temp_dir, bundle, 'step_0')
makedirs(step_dir)
# Load model bundle if required
if model_bundles is not None:
file = model_bundles[bundle]
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
model_bundle = bundle_obj.streamlines
header_model \
= create_tractogram_header(file, *bundle_obj.space_attributes)
# Preprocess all bundles and save them as trk
file_list = []
for i, sub in enumerate(subjects):
file = bundle_files[(sub, bundle)]
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
streamlines = bundle_obj.streamlines
header = create_tractogram_header(file,
*bundle_obj.space_attributes)
n_stream = len(streamlines)
if n_stream < n_stream_min:
print(f"{file} has {n_stream} streamlines. Discarded.")
continue
elif n_stream > n_stream_max:
streamlines = select_random_set_of_streamlines(streamlines,
n_stream_max)
streamlines = set_number_of_points(streamlines, n_point)
if model_bundles is not None:
streamlines, _, _, _ = slr_with_qbx(static=model_bundle,
moving=streamlines,
x0='affine',
rm_small_clusters=1,
qbx_thr=[5])
header = header_model
file = f'{step_dir}/bundle_{i}_prev_{sub}'
file_list.append(file)
new_tractogram = StatefulTractogram(streamlines, reference=header,
space=Space.RASMM)
save_trk(new_tractogram, f'{file}.trk', bbox_valid_check=False)
if save_temp and has_fury:
show_bundles([streamlines], f'{file}.png')
print("Bundle preprocessing: ok.")
# Compute pairwise registration tree-structure
tree, alone, n_reg = get_pairwise_tree(n_item=len(file_list))
# Go through all tree steps
for i_step, pairs in enumerate(tree):
new_file_list = list()
# Create step folder
step_dir = join(temp_dir, bundle, 'step_' + str(i_step+1))
mkdir(step_dir)
# A lonely bundle goes to the next level
has_lonely = 0
if alone[i_step] is not None:
has_lonely = 1
file_prev = file_list[alone[i_step]]
file_new = f'{step_dir}/bundle_0_prev_{alone[i_step]}'
new_file_list.append(file_new)
copyfile(f'{file_prev}.trk', f'{file_new}.trk')
if save_temp and has_fury:
copyfile(f'{file_prev}.png', f'{file_new}.png')
# Register and combine each pair of bundles
for index, pair in enumerate(pairs):
i = pair[0]
j = pair[1]
print(f"step:{i_step+1}/{len(tree)}" +
f" pair:{index+1}/{n_reg[i_step]}")
file = file_list[i] + '.trk'
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
static = bundle_obj.streamlines
header = create_tractogram_header(file,
*bundle_obj.space_attributes)
file = file_list[j] + '.trk'
moving = load_tractogram(file, reference='same',
bbox_valid_check=False).streamlines
aligned, _, _, _ = slr_with_qbx(static, moving, x0='affine',
rm_small_clusters=1,
qbx_thr=[5])
points, offsets = unlist_streamlines(static)
static = relist_streamlines(points, offsets)
points, offsets = unlist_streamlines(aligned)
aligned = relist_streamlines(points, offsets)
points, offsets = unlist_streamlines(moving)
moving = relist_streamlines(points, offsets)
# Randomly skip steps if speciffied to get a sharper results
if skip_pairs and np.random.choice([True, False], 1)[0]:
combined = combine_bundles(static, aligned, 'random_pick',
distance)
else:
combined = combine_bundles(static, aligned, comb_method,
distance)
file = f'{step_dir}/bundle_{index+has_lonely}_prev_{i}_{j}'
new_file_list.append(file)
new_tractogram = StatefulTractogram(Streamlines(combined),
reference=header,
space=Space.RASMM)
save_trk(new_tractogram, file + ".trk", bbox_valid_check=False)
if save_temp and has_fury:
show_bundles([static, moving, aligned], f'{file}_reg.png',
colors=[(0, 0, 1), (1, 0, 0), (0, 1, 0)])
show_bundles([Streamlines(combined)], f'{file}.png')
file_list = new_file_list
save_trk(new_tractogram, f'{out_dir}/{bundle}.trk',
bbox_valid_check=False)
atlas.append(Streamlines(combined))
if not save_temp:
rmtree(temp_dir)
if merge_out:
atlas_merged = np.concatenate(atlas)
file = f'{out_dir}/whole_brain.trk'
new_tractogram = StatefulTractogram(atlas_merged, reference=header,
space=Space.RASMM)
save_trk(new_tractogram, file, bbox_valid_check=False)
return atlas, atlas_merged
return atlas
|
def compute_atlas_bundle(in_dir, subjects=None, group=None, mid_path='',
bundle_names=None, model_bundle_dir=None,
out_dir=None, merge_out=False, save_temp=False,
n_stream_min=10, n_stream_max=5000, n_point=20,
distance='mdf', comb_method='rlap',
skip_pairs=False):
"""Compute a population specific bundle atlas.
Given several segmented bundles as input, compute the atlas by combining
the bundles pairwise.
Parameters
----------
in_dir : str
Input folder.
subjects : str, optional
Path to a BIDS-like participants.tsv file with the IDs of the subjects
to be processed. If None, all folders in ``in_dir`` are considered as
subjects.
group : str, optional
Label to select a subject group when the tsv file defining subjects
has a ``group`` column. If None, all subjects are processed.
mid_path : str, optional
Intermediate path between ``in_dir`` and bundle files. Default is ''.
bundle_names : str, optional
Path to a tsv file with the names of the bundles to be processed. If
None, all trk files of the first subject will be considered as
bundle_names.
model_bundle_dir : str, optional
Directory with model bundles to be used as a reference to move all
bundles to a common space. If None, bundles are assumed to be in the
same space and no registration is performed.
out_dir : str, optional
Output directory. If None, the current working directory is used.
merge_out : boolean, optional
If True the resulting atlases of all bundles are combined into a single
file. Default is False.
save_temp : boolean, optional
If True the intermediate results of each tree level are saved in a temp
folder in trk and png formats. Default is False.
n_stream_min : int, optional
Bundles with less than ``n_stream_min`` streamlines wont be processed.
Default is 10.
n_stream_max : int, optional
Bundles with more than ``n_stream_max`` streamlines are cropped to have
that number and speed up the computation. Default is 5000.
n_point : int, optional
All streamlines are set to have ``n_point`` points. Default is 20.
distance : str, optional
Distance metric to be used to combine bundles. Default is 'mdf'. The
'mdf_se' distance uses only start/end points of streamlines.
comb_method : str, optional
Method used to combine each bundle pair. Default is 'rlap'.
skip_pairs : boolean, optional
If true bundle combination steps are randomly skipped. This helps to
obtain a sharper result. Default is False.
Returns
-------
atlas : list of Streamlines
A list with the computed atlas bundles.
atlas_merged : Streamlines
A single bundle containing all the computed atlas bundles together.
"""
if type(in_dir) != str:
raise TypeError("in_dir must be a string")
if type(mid_path) != str:
raise TypeError("mid_path must be a string")
if type(n_point) != int:
raise TypeError("n_point must be an int")
if isdir(in_dir) is False:
raise ValueError("Input directory does not exist")
if out_dir is None:
out_dir = getcwd()
if isdir(out_dir) is False:
raise ValueError("Output directory does not exist")
if n_stream_min < 1:
raise ValueError("n_stream_min must be >= 1")
if n_stream_max < 1:
raise ValueError("n_stream_max must be >= 1")
if n_point < 2:
raise ValueError("n_point must be >= 2")
print('Input directory:' + in_dir)
print('Output directory:' + out_dir)
# Create temporary folder
temp_dir = join(out_dir, 'temp')
if isdir(temp_dir):
print("There is already a /temp folder in out_dir. Deleting.")
rmtree(temp_dir)
mkdir(temp_dir)
# Get subjects (from in_dir or a BIDS-like participants.tsv file)
if subjects is None:
files = listdir(in_dir)
subjects = [file for file in files if isdir(join(in_dir, file))]
else:
df = pd.read_csv(subjects, delimiter='\t', dtype='object')
if group is None:
subjects = list(df.participant)
else:
subjects = list(df.loc[df.group == group].participant)
subjects.sort() # necessary?
if len(set(subjects)) < len(subjects):
raise ValueError("Subjects cannot be duplicated")
print(str(len(subjects)) + " subjects to be processed:")
print(subjects)
# Get bundle names (from first subject folder or from tsv file)
if bundle_names is None:
bundle_dir = join(in_dir, subjects[0], mid_path)
print("Retrieving bundle names from " + bundle_dir)
if isdir(bundle_dir) is False:
raise ValueError("Path to subject bundles is incorrect")
files = listdir(bundle_dir)
trk_files = [file for file in files if file.endswith('.trk')]
bundle_names = [splitext(file)[0] for file in trk_files]
else:
df = pd.read_csv(bundle_names, delimiter='\t', dtype='object')
bundle_names = list(df.iloc[:, 0])
bundle_names.sort() # necessary?
if len(set(bundle_names)) < len(bundle_names):
raise ValueError("Bundle names cannot be duplicated")
print(str(len(bundle_names)) + " bundles to be processed:")
print(bundle_names)
# Create a dictionary with all bundle files
bundle_files = {}
for sub in subjects:
for bundle in bundle_names:
file = join(in_dir, sub, mid_path, bundle + '.trk')
bundle_files[(sub, bundle)] = file
# Get model bundle list
if model_bundle_dir is None:
model_bundles = None
else:
files = listdir(model_bundle_dir)
trk_files = [file for file in files if file.endswith('.trk')]
model_bundle_list = [splitext(file)[0] for file in trk_files]
if not all(x in model_bundle_list for x in bundle_names):
raise ValueError("Not all the specified bundles have a model")
model_bundles = {}
for bundle in bundle_names:
model_bundles[bundle] = join(model_bundle_dir, bundle + '.trk')
# Atlas building starts
atlas = []
for bundle in bundle_names:
print("Processing bundle: " + bundle)
step_dir = join(temp_dir, bundle, 'step_0')
makedirs(step_dir)
# Load model bundle if required
if model_bundles is not None:
file = model_bundles[bundle]
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
model_bundle = bundle_obj.streamlines
header_model \
= create_tractogram_header(file, *bundle_obj.space_attributes)
# Preprocess all bundles and save them as trk
file_list = []
for i, sub in enumerate(subjects):
file = bundle_files[(sub, bundle)]
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
streamlines = bundle_obj.streamlines
header = create_tractogram_header(file,
*bundle_obj.space_attributes)
n_stream = len(streamlines)
if n_stream < n_stream_min:
print(f"{file} has {n_stream} streamlines. Discarded.")
continue
elif n_stream > n_stream_max:
streamlines = select_random_set_of_streamlines(streamlines,
n_stream_max)
streamlines = set_number_of_points(streamlines, n_point)
if model_bundles is not None:
streamlines, _, _, _ = slr_with_qbx(static=model_bundle,
moving=streamlines,
x0='affine',
rm_small_clusters=1,
qbx_thr=[5])
header = header_model
file = f'{step_dir}/bundle_{i}_prev_{sub}'
file_list.append(file)
new_tractogram = StatefulTractogram(streamlines, reference=header,
space=Space.RASMM)
save_trk(new_tractogram, f'{file}.trk', bbox_valid_check=False)
if save_temp and has_fury:
show_bundles([streamlines], f'{file}.png')
print("Bundle preprocessing: ok.")
# Compute pairwise registration tree-structure
tree, alone, n_reg = get_pairwise_tree(n_item=len(file_list))
# Go through all tree steps
for i_step, pairs in enumerate(tree):
new_file_list = list()
# Create step folder
step_dir = join(temp_dir, bundle, 'step_' + str(i_step+1))
mkdir(step_dir)
# A lonely bundle goes to the next level
has_lonely = 0
if alone[i_step] is not None:
has_lonely = 1
file_prev = file_list[alone[i_step]]
file_new = f'{step_dir}/bundle_0_prev_{alone[i_step]}'
new_file_list.append(file_new)
copyfile(f'{file_prev}.trk', f'{file_new}.trk')
if save_temp and has_fury:
copyfile(f'{file_prev}.png', f'{file_new}.png')
# Register and combine each pair of bundles
for index, pair in enumerate(pairs):
i = pair[0]
j = pair[1]
print(f"step:{i_step+1}/{len(tree)}" +
f" pair:{index+1}/{n_reg[i_step]}")
file = file_list[i] + '.trk'
bundle_obj = load_tractogram(file, reference='same',
bbox_valid_check=False)
static = bundle_obj.streamlines
header = create_tractogram_header(file,
*bundle_obj.space_attributes)
file = file_list[j] + '.trk'
moving = load_tractogram(file, reference='same',
bbox_valid_check=False).streamlines
aligned, _, _, _ = slr_with_qbx(static, moving, x0='affine',
rm_small_clusters=1,
qbx_thr=[5])
points, offsets = unlist_streamlines(static)
static = relist_streamlines(points, offsets)
points, offsets = unlist_streamlines(aligned)
aligned = relist_streamlines(points, offsets)
points, offsets = unlist_streamlines(moving)
moving = relist_streamlines(points, offsets)
# Randomly skip steps if speciffied to get a sharper results
if skip_pairs and np.random.choice([True, False], 1)[0]:
combined = combine_bundles(static, aligned, 'random_pick',
distance)
else:
combined = combine_bundles(static, aligned, comb_method,
distance)
file = f'{step_dir}/bundle_{index+has_lonely}_prev_{i}_{j}'
new_file_list.append(file)
new_tractogram = StatefulTractogram(Streamlines(combined),
reference=header,
space=Space.RASMM)
save_trk(new_tractogram, file + ".trk", bbox_valid_check=False)
if save_temp and has_fury:
show_bundles([static, moving, aligned], f'{file}_reg.png',
colors=[(0, 0, 1), (1, 0, 0), (0, 1, 0)])
show_bundles([Streamlines(combined)], f'{file}.png')
file_list = new_file_list
save_trk(new_tractogram, f'{out_dir}/{bundle}.trk',
bbox_valid_check=False)
atlas.append(Streamlines(combined))
if not save_temp:
rmtree(temp_dir)
if merge_out:
atlas_merged = np.concatenate(atlas)
file = f'{out_dir}/whole_brain.trk'
new_tractogram = StatefulTractogram(atlas_merged, reference=header,
space=Space.RASMM)
save_trk(new_tractogram, file, bbox_valid_check=False)
return atlas, atlas_merged
return atlas
|
43,789 |
def pauli_group(n_qubits, wire_map=None):
"""Generate the :math:`n`-qubit Pauli group.
This function enables the construction of the :math:`n`-qubit Pauli group with no
storage involved. The :math:`n`-qubit Pauli group has size :math:`4^n`,
thus it may not be desirable to construct it in full and store.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the ``binary_to_pauli`` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
.Operation: The next Pauli word in the group.
**Example**
The ``pauli_group`` generator can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.pauli import pauli_group
n_qubits = 3
for p in pauli_group(n_qubits):
print(p)
The Pauli group in full can be obtained in full like so:
.. code-block:: python
full_pg = list(pauli_group(n_qubits))
The group can also be created using a custom wire map (if no map is
specified, a default map of label :math:`i` to wire ``i`` will be created).
.. code-block:: python
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
return _pauli_group_generator(n_qubits, wire_map=wire_map)
|
def pauli_group(n_qubits, wire_map=None):
"""Generate the :math:`n`-qubit Pauli group.
This function enables the construction of the :math:`n`-qubit Pauli group with no
storage involved. The :math:`n`-qubit Pauli group has size :math:`4^n`,
thus it may not be desirable to construct it in full and store.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the ``binary_to_pauli`` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
.Operation: The next Pauli word in the group.
**Example**
The ``pauli_group`` generator can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.grouping import pauli_group
n_qubits = 3
for p in pauli_group(n_qubits):
print(p)
The Pauli group in full can be obtained in full like so:
.. code-block:: python
full_pg = list(pauli_group(n_qubits))
The group can also be created using a custom wire map (if no map is
specified, a default map of label :math:`i` to wire ``i`` will be created).
.. code-block:: python
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
return _pauli_group_generator(n_qubits, wire_map=wire_map)
|
58,794 |
def test_incompatible_weight_data_type():
ifm = relay.var("ifm", shape=(1, 8, 8, 3), dtype="int8")
depthwise = make_ethosu_depthwise_conv2d(
ifm=ifm,
channels=3,
kernel_shape=(3, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int16",
)
func = relay.Function(relay.analysis.free_vars(depthwise), depthwise)
mod = tvm.IRModule.from_expr(func)
with pytest.raises(TVMError) as err:
mod = relay.transform.InferType()(mod)
message = "Expected ethosu_depthwise_conv2d type(uint8) or type(int8) for weight but was int16"
assert message in str(err.value)
|
def test_incompatible_weight_data_type():
ifm = relay.var("ifm", shape=(1, 8, 8, 3), dtype="int8")
depthwise = make_ethosu_depthwise_conv2d(
ifm=ifm,
channels=3,
kernel_shape=(3, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
weight_dtype="int16",
)
func = relay.Function(relay.analysis.free_vars(depthwise), depthwise)
mod = tvm.IRModule.from_expr(func)
message = "Expected ethosu_depthwise_conv2d type(uint8) or type(int8) for weight but was int16"
with pytest.raises(TVMError, match=message):
mod = relay.transform.InferType()(mod)
|
50,486 |
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if options.use_canonical_paths:
canonical_objdir = os.path.realpath(options.objdir)
if canonical_objdir != options.objdir:
options.objdir = canonical_objdir
logger.msg(f"--object-directory has been normalized to {options.objdir}.")
options.starting_dir = os.path.abspath(os.getcwd())
if options.use_canonical_paths:
canonical_starting_dir = os.path.realpath(options.starting_dir)
if canonical_starting_dir != options.starting_dir:
options.starting_dir = canonical_starting_dir
logger.msg(f"starting_dir has been normalized to {options.starting_dir}.")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if options.use_canonical_paths:
canonical_root = os.path.realpath(options.root)
if canonical_root != options.root:
options.root = canonical_root
logger.msg(f"--root has been normalized to {options.root}.")
canonical_rootdir = os.path.realpath(options.root_dir)
if canonical_rootdir != options.root_dir:
options.root_dir = canonical_rootdir
logger.msg(f"root_dir has been normalized to {options.root_dir}.")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
def main(args=None):
parser = create_argument_parser()
cli_options = parser.parse_args(args=args)
# load the config
cfg_name = find_config_name(cli_options)
cfg_options = {}
if cfg_name is not None:
with io.open(cfg_name, encoding='UTF-8') as cfg_file:
cfg_options = parse_config_into_dict(
parse_config_file(cfg_file, filename=cfg_name))
options_dict = merge_options_and_set_defaults(
[cfg_options, cli_options.__dict__])
options = Options(**options_dict)
logger = Logger(options.verbose)
if sys.version_info < (3, 8) and options.use_canonical_paths:
logger.warn("--use_canonical_paths will be ignored due to incompatible Python version.")
options.use_canonical_paths = None
if cli_options.version:
logger.msg(
"gcovr {version}\n"
"\n"
"{copyright}",
version=__version__, copyright=COPYRIGHT)
sys.exit(0)
if options.html_title == '':
logger.error(
"an empty --html_title= is not allowed.")
sys.exit(1)
if options.html_medium_threshold == 0:
logger.error(
"value of --html-medium-threshold= should not be zero.")
sys.exit(1)
if options.html_medium_threshold > options.html_high_threshold:
logger.error(
"value of --html-medium-threshold={} should be\n"
"lower than or equal to the value of --html-high-threshold={}.",
options.html_medium_threshold, options.html_high_threshold)
sys.exit(1)
if options.html_tab_size < 1:
logger.error(
"value of --html-tab-size= should be greater 0.")
sys.exit(1)
potential_html_output = (
(options.html and options.html.value)
or (options.html_details and options.html_details.value)
or (options.output and options.output.value))
if options.html_details and not potential_html_output:
logger.error(
"a named output must be given, if the option --html-details\n"
"is used.")
sys.exit(1)
if options.html_self_contained is False and not potential_html_output:
logger.error(
"can only disable --html-self-contained when a named output is given.")
sys.exit(1)
if options.objdir is not None:
if not options.objdir:
logger.error(
"empty --object-directory option.\n"
"\tThis option specifies the path to the object file "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
tmp = options.objdir.replace('/', os.sep).replace('\\', os.sep)
while os.sep + os.sep in tmp:
tmp = tmp.replace(os.sep + os.sep, os.sep)
if normpath(options.objdir) != tmp:
logger.warn(
"relative referencing in --object-directory.\n"
"\tthis could cause strange errors when gcovr attempts to\n"
"\tidentify the original gcc working directory.")
if not os.path.exists(normpath(options.objdir)):
logger.error(
"Bad --object-directory option.\n"
"\tThe specified directory does not exist.")
sys.exit(1)
if options.use_canonical_paths:
canonical_objdir = os.path.realpath(options.objdir)
if canonical_objdir != options.objdir:
options.objdir = canonical_objdir
logger.msg(f"--object-directory has been normalized to {options.objdir}.")
options.starting_dir = os.path.abspath(os.getcwd())
if options.use_canonical_paths:
canonical_starting_dir = os.path.realpath(options.starting_dir)
if canonical_starting_dir != options.starting_dir:
options.starting_dir = canonical_starting_dir
logger.msg(f"starting_dir has been normalized to {options.starting_dir}.")
if not options.root:
logger.error(
"empty --root option.\n"
"\tRoot specifies the path to the root "
"directory of your project.\n"
"\tThis option cannot be an empty string.")
sys.exit(1)
options.root_dir = os.path.abspath(options.root)
if canonical_path:
options.root = canonical_path(options.root, "--root")
if canonical_path:
options.root_dir = canonical_path(options.root, "root dir")
#
# Setup filters
#
# The root filter isn't technically a filter,
# but is used to turn absolute paths into relative paths
options.root_filter = re.compile('^' + re.escape(options.root_dir + os.sep))
if options.exclude_dirs is not None:
options.exclude_dirs = [
f.build_filter(logger, options.use_canonical_paths) for f in options.exclude_dirs]
options.exclude = [f.build_filter(logger, options.use_canonical_paths) for f in options.exclude]
options.filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.filter]
if not options.filter:
options.filter = [DirectoryPrefixFilter(options.root_dir)]
options.gcov_exclude = [
f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_exclude]
options.gcov_filter = [f.build_filter(logger, options.use_canonical_paths) for f in options.gcov_filter]
if not options.gcov_filter:
options.gcov_filter = [AlwaysMatchFilter()]
# Output the filters for debugging
for name, filters in [
('--root', [options.root_filter]),
('--filter', options.filter),
('--exclude', options.exclude),
('--gcov-filter', options.gcov_filter),
('--gcov-exclude', options.gcov_exclude),
('--exclude-directories', options.exclude_dirs),
]:
logger.verbose_msg('Filters for {}: ({})', name, len(filters))
for f in filters:
logger.verbose_msg('- {}', f)
if options.exclude_lines_by_pattern:
try:
re.compile(options.exclude_lines_by_pattern)
except re.error as e:
logger.error(
"--exclude-lines-by-pattern: "
"Invalid regular expression: {}, error: {}",
repr(options.exclude_lines_by_pattern), e)
sys.exit(1)
covdata = dict()
if options.add_tracefile:
collect_coverage_from_tracefiles(covdata, options, logger)
else:
collect_coverage_from_gcov(covdata, options, logger)
logger.verbose_msg("Gathered coveraged data for {} files", len(covdata))
# Print reports
error_occurred = print_reports(covdata, options, logger)
if error_occurred:
logger.error(
"Error occurred while printing reports"
)
sys.exit(7)
if options.fail_under_line > 0.0 or options.fail_under_branch > 0.0:
fail_under(covdata, options.fail_under_line, options.fail_under_branch, logger)
|
27,964 |
def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
if len(args.logfile) != 1:
LOG.warning("Only one log file can be processed right now!")
sys.exit(1)
args.output_path = os.path.abspath(args.output_path)
if os.path.exists(args.output_path) and \
not os.path.isdir(args.output_path):
LOG.error("The given output path is not a directory: " +
args.output_path)
sys.exit(1)
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# We clear the output directory in the following cases.
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
if 'ctu_phases' in args and args.ctu_phases[0] and \
os.path.isdir(ctu_dir):
# Clear the CTU-dir if the user turned on the collection phase.
LOG.debug("Previous CTU contents have been deleted.")
shutil.rmtree(ctu_dir)
if 'clean' in args and os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", args.output_path)
shutil.rmtree(args.output_path)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
LOG.debug("args: " + str(args))
LOG.debug("Output will be stored to: '" + args.output_path + "'")
# Process the skip list if present.
skip_handler = __get_skip_handler(args)
# Enable alpha uniqueing by default if ctu analysis is used.
if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
args.compile_uniqueing = "alpha"
compiler_info_file = None
if 'compiler_info_file' in args:
LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
if not os.path.exists(args.compiler_info_file):
LOG.error("Compiler info file %s does not exist",
args.compiler_info_file)
sys.exit(1)
compiler_info_file = args.compiler_info_file
report_dir = args.output_path
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly.
pre_analysis_skip_handler = None
if 'ctu_ phases' in args:
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_analysis_skip_handler = skip_handler
# Parse the JSON CCDBs and retrieve the compile commands.
actions = []
for log_file in args.logfile:
if not os.path.exists(log_file):
LOG.error("The specified logfile '%s' does not exist!",
log_file)
continue
actions += log_parser.parse_unique_log(
load_json_or_empty(log_file),
report_dir,
args.compile_uniqueing,
compiler_info_file,
args.keep_gcc_include_fixed,
skip_handler,
pre_analysis_skip_handler)
if not actions:
LOG.info("No analysis is required.\nThere were no compilation "
"commands in the provided compilation database or "
"all of them were skipped.")
sys.exit(0)
uniqued_compilation_db_file = os.path.join(
args.output_path, "unique_compile_commands.json")
with open(uniqued_compilation_db_file, 'w') as f:
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)
context = analyzer_context.get_context()
metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {}}
if 'name' in args:
metadata['name'] = args.name
# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata['result_source_files'] = \
metadata_prev['result_source_files']
analyzer.perform_analysis(args, skip_handler, context, actions, metadata)
__update_skip_file(args)
LOG.debug("Analysis metadata write to '%s'", metadata_file)
with open(metadata_file, 'w') as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile[0])
target = os.path.abspath(compile_cmd_json)
if source != target:
shutil.copyfile(source, target)
except shutil.Error:
LOG.debug("Compilation database JSON file is the same.")
except Exception:
LOG.debug("Copying compilation database JSON file failed.")
try:
from codechecker_analyzer import analyzer_statistics
analyzer_statistics.collect(metadata, "analyze")
except Exception:
pass
|
def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
logger.setup_logger(args.verbose if 'verbose' in args else None)
if len(args.logfile) != 1:
LOG.warning("Only one log file can be processed right now!")
sys.exit(1)
args.output_path = os.path.abspath(args.output_path)
if os.path.exists(args.output_path) and \
not os.path.isdir(args.output_path):
LOG.error("The given output path is not a directory: " +
args.output_path)
sys.exit(1)
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# We clear the output directory in the following cases.
ctu_dir = os.path.join(args.output_path, 'ctu-dir')
if 'ctu_phases' in args and args.ctu_phases[0] and \
os.path.isdir(ctu_dir):
# Clear the CTU-dir if the user turned on the collection phase.
LOG.debug("Previous CTU contents have been deleted.")
shutil.rmtree(ctu_dir)
if 'clean' in args and os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '%s' have been removed, "
"overwriting with current result", args.output_path)
shutil.rmtree(args.output_path)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
LOG.debug("args: " + str(args))
LOG.debug("Output will be stored to: '" + args.output_path + "'")
# Process the skip list if present.
skip_handler = __get_skip_handler(args)
# Enable alpha uniqueing by default if ctu analysis is used.
if 'none' in args.compile_uniqueing and 'ctu_phases' in args:
args.compile_uniqueing = "alpha"
compiler_info_file = None
if 'compiler_info_file' in args:
LOG.debug("Compiler info is read from: %s", args.compiler_info_file)
if not os.path.exists(args.compiler_info_file):
LOG.error("Compiler info file %s does not exist",
args.compiler_info_file)
sys.exit(1)
compiler_info_file = args.compiler_info_file
report_dir = args.output_path
# Skip list is applied only in pre-analysis
# if --ctu-collect or --stats-collect was called explicitly.
pre_analysis_skip_handler = None
if 'ctu_phases' in args:
ctu_collect = args.ctu_phases[0]
ctu_analyze = args.ctu_phases[1]
if ((ctu_collect and not ctu_analyze)
or ("stats_output" in args and args.stats_output)):
pre_analysis_skip_handler = skip_handler
# Parse the JSON CCDBs and retrieve the compile commands.
actions = []
for log_file in args.logfile:
if not os.path.exists(log_file):
LOG.error("The specified logfile '%s' does not exist!",
log_file)
continue
actions += log_parser.parse_unique_log(
load_json_or_empty(log_file),
report_dir,
args.compile_uniqueing,
compiler_info_file,
args.keep_gcc_include_fixed,
skip_handler,
pre_analysis_skip_handler)
if not actions:
LOG.info("No analysis is required.\nThere were no compilation "
"commands in the provided compilation database or "
"all of them were skipped.")
sys.exit(0)
uniqued_compilation_db_file = os.path.join(
args.output_path, "unique_compile_commands.json")
with open(uniqued_compilation_db_file, 'w') as f:
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)
context = analyzer_context.get_context()
metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {}}
if 'name' in args:
metadata['name'] = args.name
# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata['result_source_files'] = \
metadata_prev['result_source_files']
analyzer.perform_analysis(args, skip_handler, context, actions, metadata)
__update_skip_file(args)
LOG.debug("Analysis metadata write to '%s'", metadata_file)
with open(metadata_file, 'w') as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile[0])
target = os.path.abspath(compile_cmd_json)
if source != target:
shutil.copyfile(source, target)
except shutil.Error:
LOG.debug("Compilation database JSON file is the same.")
except Exception:
LOG.debug("Copying compilation database JSON file failed.")
try:
from codechecker_analyzer import analyzer_statistics
analyzer_statistics.collect(metadata, "analyze")
except Exception:
pass
|
1,817 |
def _encode(values, *, uniques, check_unknown=True):
"""Helper function encode values.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array
Uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
check_unknown : bool, default True
If True, check for values in ``values`` that are not in ``unique``
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _encode_check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
if values.dtype == object:
table = {val: i for i, val in enumerate(uniques)}
try:
return np.array([table[v] for v in values])
except KeyError as e:
raise ValueError(f"y contains previously unseen labels: {str(e)}")
else:
if check_unknown:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError(f"y contains previously unseen labels: "
f"{str(diff)}")
return np.searchsorted(uniques, values)
|
def _encode(values, *, uniques, check_unknown=True):
"""Helper function to encode values into [0, n_uniques - 1]
Uses pure python method for object dtype, and numpy method for
all other dtypes.
The numpy method has the limitation that the `uniques` need to
be sorted. Importantly, this is not checked but assumed to already be
the case. The calling method needs to ensure this for all non-object
values.
Parameters
----------
values : array
Values to factorize or encode.
uniques : array
Uniques are not determined from passed values (this
can be because the user specified categories, or because they
already have been determined in fit).
check_unknown : bool, default True
If True, check for values in ``values`` that are not in ``unique``
and raise an error. This is ignored for object dtype, and treated as
True in this case. This parameter is useful for
_BaseEncoder._transform() to avoid calling _encode_check_unknown()
twice.
Returns
-------
encoded : ndarray
Encoded values
"""
if values.dtype == object:
table = {val: i for i, val in enumerate(uniques)}
try:
return np.array([table[v] for v in values])
except KeyError as e:
raise ValueError(f"y contains previously unseen labels: {str(e)}")
else:
if check_unknown:
diff = _encode_check_unknown(values, uniques)
if diff:
raise ValueError(f"y contains previously unseen labels: "
f"{str(diff)}")
return np.searchsorted(uniques, values)
|
45,982 |
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1).float()
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
def lovasz_hinge_loss(input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
r"""Criterion that computes a surrogate binary intersection-over-union (IoU) loss.
According to [2], we compute the IoU as follows:
.. math::
\text{IoU}(x, class) = \frac{|X \cap Y|}{|X \cup Y|}
[1] approximates this fomular with a surrogate, which is fully differentable.
Where:
- :math:`X` expects to be the scores of each class.
- :math:`Y` expects to be the binary tensor with the class labels.
the loss, is finally computed as:
.. math::
\text{loss}(x, class) = 1 - \text{IoU}(x, class)
Reference:
[1] http://proceedings.mlr.press/v37/yub15.pdf
[2] https://arxiv.org/pdf/1705.08790.pdf
. note::
This loss function only supports binary labels. For multi-class labels please
use the Lovasz-Softmax loss.
Args:
input: logits tensor with shape :math:`(N, 1, H, W)`.
labels: labels tensor with shape :math:`(N, H, W)` with binary values.
Return:
a scalar with the computed loss.
Example:
>>> N = 1 # num_classes
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = lovasz_hinge_loss(input, target)
>>> output.backward()
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Target type is not a torch.Tensor. Got {type(target)}")
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not len(target.shape) == 3:
raise ValueError(f"Invalid target shape, we expect BxHxW. Got: {target.shape}")
if not input.shape[1] == 1:
raise ValueError(f"Invalid input shape, we expect Bx1xHxW. Got: {input.shape}")
if not input.shape[-2:] == target.shape[-2:]:
raise ValueError(f"input and target shapes must be the same. Got: {input.shape} and {target.shape}")
if not input.device == target.device:
raise ValueError(f"input and target must be in the same device. Got: {input.device} and {target.device}")
# flatten input and target [B, -1] and to float
input_flatten: torch.Tensor = input.flatten(start_dim=1)
target_flatten: torch.Tensor = target.flatten(start_dim=1)
# get shapes
B, N = input_flatten.shape
# compute probabilities
input_prob: torch.Tensor = torch.sigmoid(input_flatten)
# compute actual loss
signs = 2. * target_flatten - 1.
errors = 1. - input_prob * signs
errors_sorted, permutation = torch.sort(errors, dim=1, descending=True)
batch_index: torch.Tensor = torch.arange(B, device=input.device).repeat_interleave(N, dim=0)
target_sorted: torch.Tensor = target_flatten[batch_index, permutation.view(-1)]
target_sorted: torch.Tensor = target_sorted.view(B, N)
target_sorted_sum: torch.Tensor = target_sorted.sum(dim=1, keepdim=True)
intersection: torch.Tensor = target_sorted_sum - target_sorted.cumsum(dim=1)
union: torch.Tensor = target_sorted_sum + (1. - target_sorted).cumsum(dim=1)
gradient: torch.Tensor = 1. - intersection / union
if N > 1:
gradient[..., 1:] = gradient[..., 1:] - gradient[..., :-1]
loss: torch.Tensor = (F.relu(errors_sorted) * gradient).sum(dim=1).mean()
return loss
|
34,229 |
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name not in registered_components:
if component_name not in old_style_names:
try:
return class_from_module_path(component_name)
except ModuleNotFoundError as e:
# when component_name is a path to a class but that path is invalid
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n{}".format(
component_name, e.msg
)
)
except AttributeError:
# when component_name is a path to a class but the path does not contain that class
module_name, _, class_name = component_name.rpartition(".")
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n"
"Cannot find class '{}' in module {}.".format(
component_name, class_name, module_name
)
)
except ImportError:
# when component_name is a class name and not part of old_style_names
raise Exception(
"Failed to find component class for '{0}'.Unknown component name.\n"
"Cannot import class '{0}' from global namespace.".format(
component_name
)
)
else:
# DEPRECATED ensures compatibility, remove in future versions
logger.warning(
"DEPRECATION warning: your nlu config file "
"contains old style component name `{}`, "
"you should change it to its class name: `{}`."
"".format(component_name, old_style_names[component_name])
)
component_name = old_style_names[component_name]
return registered_components[component_name]
|
def get_component_class(component_name: Text) -> Type["Component"]:
"""Resolve component name to a registered components class."""
if component_name not in registered_components:
if component_name not in old_style_names:
try:
return class_from_module_path(component_name)
except ModuleNotFoundError as e:
# when component_name is a path to a class but that path is invalid
raise Exception(
"Failed to find component class for '{}'.Unknown component name.\n{}".format(
component_name, e.msg
)
)
except AttributeError:
# when component_name is a path to a class but the path does not contain that class
module_name, _, class_name = component_name.rpartition(".")
raise Exception(
"Failed to find class '{}' in module '{}'.\n"
"Cannot find class '{}' in module {}.".format(
component_name, class_name, module_name
)
)
except ImportError:
# when component_name is a class name and not part of old_style_names
raise Exception(
"Failed to find component class for '{0}'.Unknown component name.\n"
"Cannot import class '{0}' from global namespace.".format(
component_name
)
)
else:
# DEPRECATED ensures compatibility, remove in future versions
logger.warning(
"DEPRECATION warning: your nlu config file "
"contains old style component name `{}`, "
"you should change it to its class name: `{}`."
"".format(component_name, old_style_names[component_name])
)
component_name = old_style_names[component_name]
return registered_components[component_name]
|
57,795 |
def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as file:
private_index_json = json.load(file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
59,522 |
def data_to_binary(obj, serializer, **kwargs):
"""Convert object into binary data with specified serializer.
Args:
obj (any): Object to serialize.
serializer (Callable): Serializer callback that can handle input object type.
kwargs: Options set to the serializer.
Returns:
bytes: Binary data.
"""
with io.BytesIO() as container:
serializer(container, obj, **kwargs)
container.seek(0)
binary_data = container.read()
return binary_data
|
def data_to_binary(obj, serializer, **kwargs):
"""Convert object into binary data with specified serializer.
Args:
obj (any): Object to serialize.
serializer (Callable): Serializer callback that can handle input object type.
kwargs: Options set to the serializer.
Returns:
bytes: Binary data.
"""
with io.BytesIO() as container:
serializer(container, obj, **kwargs)
binary_data = container.getvalue()
return binary_data
|
48,133 |
def test_help_stdouputs_of_tools():
with open("QUICK_START_GUIDE.md", encoding="UTF-8") as read_file:
commands = []
full_text = ''
for line in read_file:
full_text += line
if "ote" in line and "--help" in line:
commands.append(line.strip().split(' '))
for command in commands:
output = run(command, capture_output=True)
help_message = output.stdout.decode()
found = True
if help_message not in full_text:
found = False
for _ in range(10):
help_message = "\n".join([" " + line for line in help_message.split("\n")])
if help_message in full_text:
found = True
break
assert found, f"\n{output.stdout.decode()}"
|
def test_help_stdouputs_of_tools():
with open("QUICK_START_GUIDE.md", encoding="UTF-8") as read_file:
commands = []
full_text = ''
for line in read_file:
full_text += line
if "ote" in line and "--help" in line:
commands.append(line.strip().split(' '))
for command in commands:
output = run(command, capture_output=True)
help_message = output.stdout.decode()
found = True
if help_message not in full_text:
found = False
for _ in range(10):
help_message = "\n".join([" " + line for line in help_message.split("\n")])
if help_message in full_text:
found = True
break
assert found, f"\nHelp message:\n{output.stdout.decode()}\n was not found in \n{full_text}"
|
54,395 |
def mark_package_needs_build(
pkg_map: Dict[str, BasePackage], pkg: BasePackage, needs_build: Set[str]
):
needs_build.add(pkg.name)
for dep in pkg.dependents:
mark_package_needs_build(pkg_map, pkg_map[dep], needs_build)
|
def mark_package_needs_build(
pkg_map: Dict[str, BasePackage], pkg: BasePackage
) -> Set[str]:
needs_build = set(pkg.name)
for dep in pkg.dependents:
needs_build.update(mark_package_needs_build(pkg_map, pkg_map[dep]))
return needs_build
|
30,836 |
def fetch_incidents():
"""
Fetches incident using the detections API
:return: Fetched detections in incident format
"""
incidents = [] # type:List
incidents_or_detections = demisto.params().get('fetch_incidents_or_detections')
if 'detections' in incidents_or_detections or not incidents_or_detections:
last_run = demisto.getLastRun()
# Get the last fetch time, if exists
last_fetch = last_run.get('first_behavior_time')
# Handle first time fetch, fetch incidents retroactively
if last_fetch is None:
last_fetch, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
last_fetch_timestamp = int(parse(last_fetch).timestamp() * 1000)
last_detection_id = str(last_run.get('last_detection_id'))
fetch_query = demisto.params().get('fetch_query')
if fetch_query:
fetch_query = "created_timestamp:>'{time}'+{query}".format(time=last_fetch, query=fetch_query)
detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query), 'resources')
else:
detections_ids = demisto.get(get_fetch_detections(last_created_timestamp=last_fetch), 'resources')
if detections_ids:
# make sure we do not fetch the same detection again.
if last_detection_id == detections_ids[0]:
first_index_to_fetch = 1
# if this is the only detection - dont fetch.
if len(detections_ids) == 1:
return incidents
# if the first detection in this pull is different than the last detection fetched we bring it as well
else:
first_index_to_fetch = 0
# Limit the results to INCIDENTS_PER_FETCH`z
last_index_to_fetch = INCIDENTS_PER_FETCH + first_index_to_fetch
detections_ids = detections_ids[first_index_to_fetch:last_index_to_fetch]
raw_res = get_detections_entities(detections_ids)
if "resources" in raw_res:
raw_res['type'] = "detections"
for detection in raw_res.get("resources"):
incident = detection_to_incident(detection)
incident_date = incident['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch = incident_date
last_fetch_timestamp = incident_date_timestamp
last_detection_id = json.loads(incident['rawJSON']).get('detection_id')
incidents.append(incident)
demisto.setLastRun({'first_behavior_time': last_fetch, 'last_detection_id': last_detection_id})
if 'incidents' in incidents_or_detections:
last_run = demisto.getLastRun()
last_fetch = last_run.get('first_behavior_incident_time')
if last_fetch is None:
last_fetch, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
last_fetch_timestamp = int(parse(last_fetch).timestamp() * 1000)
last_incident_id = str(last_run.get('last_incident_id'))
fetch_query = demisto.params().get('fetch_query')
if fetch_query:
fetch_query = "modified_timestamp:>'{time}'+{query}".format(time=last_fetch, query=fetch_query)
incidents_ids = demisto.get(get_fetch_incidents(filter_arg=fetch_query), 'resources')
else:
incidents_ids = demisto.get(get_fetch_incidents(last_created_timestamp=last_fetch), 'resources')
if incidents_ids:
# make sure we do not fetch the same detection again.
if last_incident_id == incidents_ids[0]:
first_index_to_fetch = 1
# if this is the only detection - dont fetch.
if len(incidents_ids) == 1:
return incidents
# if the first detection in this pull is different than the last detection fetched we bring it as well
else:
first_index_to_fetch = 0
# Limit the results to INCIDENTS_PER_FETCH`z
last_index_to_fetch = INCIDENTS_PER_FETCH + first_index_to_fetch
incidents_ids = incidents_ids[first_index_to_fetch:last_index_to_fetch]
raw_res = get_incidents_entities(incidents_ids)
if "resources" in raw_res:
raw_res['type'] = "incidents"
for incident in raw_res.get("resources"):
incident_to_context = incident_to_incident_context(incident)
incident_date = incident_to_context['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch = incident_date
last_fetch_timestamp = incident_date_timestamp
last_incident_id = json.loads(incident_to_context['rawJSON']).get('incident_id')
incidents.append(incident_to_context)
demisto.setLastRun({'first_behavior_incident_time': last_fetch, 'last_incident_id': last_incident_id})
return incidents
|
def fetch_incidents():
"""
Fetches incident using the detections API
:return: Fetched detections in incident format
"""
incidents = [] # type:List
incidents_or_detections = demisto.params().get('fetch_incidents_or_detections')
if 'detections' in incidents_or_detections or not incidents_or_detections:
last_run = demisto.getLastRun()
# Get the last fetch time, if exists
last_fetch = last_run.get('first_behavior_time')
# Handle first time fetch, fetch incidents retroactively
if last_fetch is None:
last_fetch, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
last_fetch_timestamp = int(parse(last_fetch).timestamp() * 1000)
last_detection_id = str(last_run.get('last_detection_id'))
fetch_query = demisto.params().get('fetch_query')
if fetch_query:
fetch_query = "created_timestamp:>'{time}'+{query}".format(time=last_fetch, query=fetch_query)
detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query), 'resources')
else:
detections_ids = demisto.get(get_fetch_detections(last_created_timestamp=last_fetch), 'resources')
if detections_ids:
# make sure we do not fetch the same detection again.
if last_detection_id == detections_ids[0]:
first_index_to_fetch = 1
# if this is the only detection - dont fetch.
if len(detections_ids) == 1:
return incidents
# if the first detection in this pull is different than the last detection fetched we bring it as well
else:
first_index_to_fetch = 0
# Limit the results to INCIDENTS_PER_FETCH`z
last_index_to_fetch = INCIDENTS_PER_FETCH + first_index_to_fetch
detections_ids = detections_ids[first_index_to_fetch:last_index_to_fetch]
raw_res = get_detections_entities(detections_ids)
if "resources" in raw_res:
raw_res['type'] = "detections"
for detection in raw_res.get("resources"):
incident = detection_to_incident(detection)
incident_date = incident['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch = incident_date
last_fetch_timestamp = incident_date_timestamp
last_detection_id = json.loads(incident['rawJSON']).get('detection_id')
incidents.append(incident)
demisto.setLastRun({'first_behavior_time': last_fetch, 'last_detection_id': last_detection_id})
if 'incidents' in incidents_or_detections:
last_run = demisto.getLastRun()
last_fetch = last_run.get('first_behavior_incident_time')
if not last_fetch:
last_fetch, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
last_fetch_timestamp = int(parse(last_fetch).timestamp() * 1000)
last_incident_id = str(last_run.get('last_incident_id'))
fetch_query = demisto.params().get('fetch_query')
if fetch_query:
fetch_query = "modified_timestamp:>'{time}'+{query}".format(time=last_fetch, query=fetch_query)
incidents_ids = demisto.get(get_fetch_incidents(filter_arg=fetch_query), 'resources')
else:
incidents_ids = demisto.get(get_fetch_incidents(last_created_timestamp=last_fetch), 'resources')
if incidents_ids:
# make sure we do not fetch the same detection again.
if last_incident_id == incidents_ids[0]:
first_index_to_fetch = 1
# if this is the only detection - dont fetch.
if len(incidents_ids) == 1:
return incidents
# if the first detection in this pull is different than the last detection fetched we bring it as well
else:
first_index_to_fetch = 0
# Limit the results to INCIDENTS_PER_FETCH`z
last_index_to_fetch = INCIDENTS_PER_FETCH + first_index_to_fetch
incidents_ids = incidents_ids[first_index_to_fetch:last_index_to_fetch]
raw_res = get_incidents_entities(incidents_ids)
if "resources" in raw_res:
raw_res['type'] = "incidents"
for incident in raw_res.get("resources"):
incident_to_context = incident_to_incident_context(incident)
incident_date = incident_to_context['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch = incident_date
last_fetch_timestamp = incident_date_timestamp
last_incident_id = json.loads(incident_to_context['rawJSON']).get('incident_id')
incidents.append(incident_to_context)
demisto.setLastRun({'first_behavior_incident_time': last_fetch, 'last_incident_id': last_incident_id})
return incidents
|
27,370 |
def angle_between_base_planes(universe, b1, b2, seg1="SYSTEM", seg2="SYSTEM"):
"""The angle between the planes for given two bases is computed.
.. Note:: This angle calculation will only work if using atom names as
documented by charmm force field parameters.
Parameters
----------
universe : Universe
:class:`~MDAnalysis.core.universe.Universe` containing the
trajectory
b1 : int
resid of the first base
b2 : int
resid of the second base
segid1 : str (optional)
segid of b1
segid2 : str (optional)
segid of b2
Returns
-------
float
angle in degrees
.. versionadded:: 1.0.1
"""
baseatoms = { 'CYT': ['N1', 'N3', 'C5'],
'THY': ['N1', 'N3', 'C5'],
'URA': ['N1', 'N3', 'C5'],
'ADE': ['N9', 'N1', 'N7'],
'GUA': ['N9', 'N1', 'N7'] }
# select residues
bf1 = universe.select_atoms("(segid {0!s} and resid {1!s})".format( seg1, b1))
bf2 = universe.select_atoms("(segid {0!s} and resid {1!s})".format( seg2, b2))
# extract positions for specific atoms of each base
c11 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][0])).positions[0]
c12 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][1])).positions[0]
c13 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][2])).positions[0]
c21 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][0])).positions[0]
c22 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][1])).positions[0]
c23 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][2])).positions[0]
# get normals to the planes of the bases
n1 = mdamath.normal(c12-c11,c13-c11)
n2 = mdamath.normal(c22-c21,c23-c21)
# calculate angle between the normal vectors
angl = mdamath.angle(n1,n2)
angl = np.rad2deg(angl) % 360
return angl
|
def angle_between_base_planes(universe, b1, b2, seg1="SYSTEM", seg2="SYSTEM"):
"""The angle between the planes for given two bases is computed.
.. Note:: This angle calculation will only work if using atom names as
documented by charmm force field parameters.
Parameters
----------
universe : Universe
:class:`~MDAnalysis.core.universe.Universe` containing the
trajectory
b1 : int
resid of the first base
b2 : int
resid of the second base
segid1 : str (optional)
segid of b1
segid2 : str (optional)
segid of b2
Returns
-------
float
angle in degrees
.. versionadded:: 2.1.0
"""
baseatoms = { 'CYT': ['N1', 'N3', 'C5'],
'THY': ['N1', 'N3', 'C5'],
'URA': ['N1', 'N3', 'C5'],
'ADE': ['N9', 'N1', 'N7'],
'GUA': ['N9', 'N1', 'N7'] }
# select residues
bf1 = universe.select_atoms("(segid {0!s} and resid {1!s})".format( seg1, b1))
bf2 = universe.select_atoms("(segid {0!s} and resid {1!s})".format( seg2, b2))
# extract positions for specific atoms of each base
c11 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][0])).positions[0]
c12 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][1])).positions[0]
c13 = bf1.select_atoms("name {0!s}".format(baseatoms[bf1.atoms.resnames[0]][2])).positions[0]
c21 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][0])).positions[0]
c22 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][1])).positions[0]
c23 = bf2.select_atoms("name {0!s}".format(baseatoms[bf2.atoms.resnames[0]][2])).positions[0]
# get normals to the planes of the bases
n1 = mdamath.normal(c12-c11,c13-c11)
n2 = mdamath.normal(c22-c21,c23-c21)
# calculate angle between the normal vectors
angl = mdamath.angle(n1,n2)
angl = np.rad2deg(angl) % 360
return angl
|
59,119 |
def test_load_backend_if_not_loaded_load_once(manager, monkeypatch):
"""Test the :meth:`aiida.cmdline.utils.decorators.load_backend_if_not_loaded` calls load profile only once."""
mocked = mock.Mock()
with monkeypatch.context() as context:
context.setattr(manager.__class__, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1
assert not manager.profile_storage_loaded
# This is necessary, despute the previous change being in a context. Without it, subsequent tests that require
# the profile storage will fail.
monkeypatch.undo()
# Now actually call ``get_profile_storage`` because since it was mocked in the previous call, it won't actually
# have been called and the implemenation of ``load_backend_if_not_loaded`` working correctly depends on the
# profile storage actually having been initialized.
manager.get_profile_storage()
assert manager.profile_storage_loaded
with monkeypatch.context() as context:
context.setattr(manager.__class__, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1, 'Apparently `Manager.get_profile_storage` got called again, which is a bug'
|
def test_load_backend_if_not_loaded_load_once(manager, monkeypatch):
"""Test the :meth:`aiida.cmdline.utils.decorators.load_backend_if_not_loaded` calls load profile only once."""
mocked = mock.Mock()
with monkeypatch.context() as context:
context.setattr(manager.__class__, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1
assert not manager.profile_storage_loaded
# This is necessary, despite the previous change being in a context. Without it, subsequent tests that require
# the profile storage will fail.
monkeypatch.undo()
# Now actually call ``get_profile_storage`` because since it was mocked in the previous call, it won't actually
# have been called and the implemenation of ``load_backend_if_not_loaded`` working correctly depends on the
# profile storage actually having been initialized.
manager.get_profile_storage()
assert manager.profile_storage_loaded
with monkeypatch.context() as context:
context.setattr(manager.__class__, 'get_profile_storage', mocked)
load_backend_if_not_loaded()
assert mocked.call_count == 1, 'Apparently `Manager.get_profile_storage` got called again, which is a bug'
|
21,263 |
def get_filename_from_headers(headers):
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers (twisted.web.http_headers.Headers): The HTTP
request headers.
Returns:
A Unicode string of the filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b''])
# Decode the Content-Disposition header (cgi.parse_header requires
# unicode on Python 3, not bytes) the best we can.
try:
content_disposition = content_disposition[0].decode('utf8')
except UnicodeDecodeError:
# Wasn't valid UTF-8, therefore not valid ASCII. Give up on figuring
# out what the mess they've sent is.
content_disposition = None
if not content_disposition:
return None
_, params = cgi.parse_header(content_disposition)
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get("filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith("utf-8''"):
upload_name = upload_name_utf8[7:]
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get("filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii
if not upload_name:
# We couldn't find a valid filename in the headers.
return None
# Unquote the string
if PY3:
upload_name = urllib.parse.unquote(upload_name)
else:
# Needs to be bytes on Python 2
upload_name = urllib.parse.unquote(upload_name.encode('utf8'))
try:
if isinstance(upload_name, bytes):
upload_name = upload_name.decode("utf-8")
except UnicodeDecodeError:
upload_name = None
return upload_name
|
def get_filename_from_headers(headers):
"""
Get the filename of the downloaded file by inspecting the
Content-Disposition HTTP header.
Args:
headers (twisted.web.http_headers.Headers): The HTTP
request headers.
Returns:
A Unicode string of the filename, or None.
"""
content_disposition = headers.get(b"Content-Disposition", [b''])
# Decode the Content-Disposition header (cgi.parse_header requires
# unicode on Python 3, not bytes) the best we can.
try:
content_disposition = content_disposition[0].decode('utf8')
except UnicodeDecodeError:
# Wasn't valid UTF-8, therefore not valid ASCII. Give up on figuring
# out what the mess they've sent is.
content_disposition = None
if not content_disposition:
return None
_, params = cgi.parse_header(content_disposition)
upload_name = None
# First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get("filename*", None)
if upload_name_utf8:
if upload_name_utf8.lower().startswith("utf-8''"):
upload_name = upload_name_utf8[7:]
# If there isn't check for an ascii name.
if not upload_name:
upload_name_ascii = params.get("filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii):
upload_name = upload_name_ascii
if not upload_name:
# We couldn't find a valid filename in the headers.
return None
# Unquote the string
if PY3:
upload_name = urllib.parse.unquote(upload_name)
else:
# Needs to be bytes on Python 2
upload_name = urllib.parse.unquote(upload_name.encode('utf8'))
try:
if isinstance(upload_name, bytes):
upload_name = upload_name.decode("utf-8")
except UnicodeDecodeError:
upload_name = None
return upload_name
|
33,302 |
def date_list_to_queryset(date_list, table):
or_queryset = Q()
for v in date_list:
# Modified May 2018 so that there will always be a start and end value from combine_date_range_queryset()
date_type_dict = v.get("date_type_dict", {"gte": "action_date", "lte": "action_date"})
for date_type in date_type_dict.values():
if date_type not in ["action_date", "last_modified_date", "date_signed"]:
raise InvalidParameterException("Invalid date_type: {}".format(date_type))
# (StartA <= EndB) and (EndA >= StartB)
# where "A" is an Award and "B" is the date range being searched
kwargs = {
"{}__gte".format(date_type_dict["gte"]): v["start_date"],
"{}__lte".format(date_type_dict["lte"]): v["end_date"],
}
or_queryset |= Q(**kwargs)
return table.objects.filter(or_queryset)
|
def date_list_to_queryset(date_list, table):
or_queryset = Q()
for v in date_list:
date_type_dict = v.get("date_type_dict", {"gte": "action_date", "lte": "action_date"})
for date_type in date_type_dict.values():
if date_type not in ["action_date", "last_modified_date", "date_signed"]:
raise InvalidParameterException("Invalid date_type: {}".format(date_type))
# (StartA <= EndB) and (EndA >= StartB)
# where "A" is an Award and "B" is the date range being searched
kwargs = {
"{}__gte".format(date_type_dict["gte"]): v["start_date"],
"{}__lte".format(date_type_dict["lte"]): v["end_date"],
}
or_queryset |= Q(**kwargs)
return table.objects.filter(or_queryset)
|
41,756 |
def create_model(vocab, trial: optuna.Trial):
embedding = allennlp.modules.Embedding(
embedding_dim=50,
trainable=True,
pretrained_file=GLOBE_FILE_PATH,
num_embeddings=vocab.get_vocab_size('tokens'),
)
embedder = allennlp.modules.text_field_embedders.BasicTextFieldEmbedder(
{'tokens': embedding}
)
output_dim = trial.suggest_int('output_dim', 10, 100)
max_filter_size = trial.suggest_int('max_filter_size', 3, 6)
num_filters = trial.suggest_int('num_filters', 64, 512)
encoder = allennlp.modules.seq2vec_encoders.CnnEncoder(
ngram_filter_sizes=range(1, max_filter_size),
num_filters=num_filters,
embedding_dim=50,
output_dim=output_dim,
)
dropout = trial.suggest_uniform('dropout', 0, 0.5)
model = allennlp.models.BasicClassifier(
text_field_embedder=embedder,
seq2vec_encoder=encoder,
dropout=dropout,
vocab=vocab,
)
return model
|
def create_model(vocab, trial: optuna.Trial):
embedding = allennlp.modules.Embedding(
embedding_dim=50,
trainable=True,
pretrained_file=GLOVE_FILE_PATH,
num_embeddings=vocab.get_vocab_size('tokens'),
)
embedder = allennlp.modules.text_field_embedders.BasicTextFieldEmbedder(
{'tokens': embedding}
)
output_dim = trial.suggest_int('output_dim', 10, 100)
max_filter_size = trial.suggest_int('max_filter_size', 3, 6)
num_filters = trial.suggest_int('num_filters', 64, 512)
encoder = allennlp.modules.seq2vec_encoders.CnnEncoder(
ngram_filter_sizes=range(1, max_filter_size),
num_filters=num_filters,
embedding_dim=50,
output_dim=output_dim,
)
dropout = trial.suggest_uniform('dropout', 0, 0.5)
model = allennlp.models.BasicClassifier(
text_field_embedder=embedder,
seq2vec_encoder=encoder,
dropout=dropout,
vocab=vocab,
)
return model
|
27,558 |
def send_exception(stacktrace, source):
"""Send exception stacktrace over HTTP for tracking"""
# Check if the user wants to send metrics and errors
if s.get("send_metrics"):
data = urllib.parse.urlencode({"stacktrace": stacktrace,
"platform": platform.system(),
"version": info.VERSION,
"source": source,
"unique_install_id": s.get("unique_install_id")})
url = "http://www.openshot.org/exception/json/"
# Send exception HTTP data
try:
r = requests.post(url, data=data, headers={"user-agent": user_agent, "content-type": "application/x-www-form-urlencoded"}, verify=False)
log.info("Track exception: [%s] %s | %s", r.status_code, r.url, r.text)
except Exception as ex:
log.warning("Failed to Track exception: %s", ex)
|
def send_exception(stacktrace, source):
"""Send exception stacktrace over HTTP for tracking"""
# Check if the user wants to send metrics and errors
if s.get("send_metrics"):
data = urllib.parse.urlencode({"stacktrace": stacktrace,
"platform": platform.system(),
"version": info.VERSION,
"source": source,
"unique_install_id": s.get("unique_install_id")})
url = "http://www.openshot.org/exception/json/"
# Send exception HTTP data
try:
r = requests.post(url, data=data, headers={"user-agent": user_agent, "content-type": "application/x-www-form-urlencoded"}, verify=False)
log.info("Track exception: [%s] %s | %s", r.status_code, r.url, r.text)
except Exception as ex:
log.warning("Failed to track exception", exc_info=1)
|
33,084 |
def test_matcher_called():
"""Test that if the decorated skill is called, the skill function gets called"""
skill = _TestSkill(None, None)
message = Mock()
skill.hello_skill(message)
assert message.respond.called_once
|
def test_matcher_called():
"""Test that if the decorated skill is called, the skill function gets called."""
skill = _TestSkill(None, None)
message = Mock()
skill.hello_skill(message)
assert message.respond.called_once
|
32,555 |
def securerank_to_dbotscore(sr):
# converts cisco umbrella score to dbotscore
DBotScore = 0
if sr is not None:
if SUSPICOUS_THRESHOLD < sr <= 100:
DBotScore = 1
elif MALICIOUS_THRESHOLD < sr <= SUSPICOUS_THRESHOLD:
DBotScore = 2
elif sr <= MALICIOUS_THRESHOLD:
DBotScore = 3
return DBotScore
|
def securerank_to_dbotscore(sr):
# converts cisco umbrella score to dbotscore
DBotScore = 0
if sr is not None:
if SUSPICIOUS_THRESHOLD < sr <= 100:
DBotScore = 1
elif MALICIOUS_THRESHOLD < sr <= SUSPICOUS_THRESHOLD:
DBotScore = 2
elif sr <= MALICIOUS_THRESHOLD:
DBotScore = 3
return DBotScore
|
32,405 |
def fetch_incidents(client: Client):
user_key = 'me'
query = '' if params['query'] is None else params['query']
last_run = demisto.getLastRun()
demisto.debug(f'last run: {last_run}')
last_fetch = last_run.get('gmt_time')
next_last_fetch = last_run.get('next_gmt_time')
page_token = last_run.get('page_token') or None
ignore_ids: List[str] = last_run.get('ignore_ids') or []
ignore_list_used = last_run.get('ignore_list_used') or False # can we reset the ignore list if we haven't used it
# handle first time fetch - gets current GMT time -1 day
if not last_fetch:
last_fetch, _ = parse_date_range(date_range=FETCH_TIME, utc=True, to_timestamp=False)
last_fetch = str(last_fetch.isoformat(timespec='seconds')) + 'Z'
# use replace(tzinfo) to make the datetime aware of the timezone as all other dates we use are aware
last_fetch = client.parse_date_isoformat_server(last_fetch)
if next_last_fetch:
next_last_fetch = client.parse_date_isoformat_server(next_last_fetch)
else:
next_last_fetch = last_fetch + timedelta(seconds=1)
service = client.get_service('gmail', 'v1')
# use seconds for the filter (note that it is inclusive)
# see: https://developers.google.com/gmail/api/guides/filtering
query += f' after:{int(last_fetch.timestamp())}'
max_results = MAX_FETCH
if MAX_FETCH > 200:
max_results = 200
LOG(f'GMAIL: fetch parameters: user: {user_key} query={query}'
f' fetch time: {last_fetch} page_token: {page_token} max results: {max_results}')
result = service.users().messages().list(
userId=user_key, maxResults=max_results, pageToken=page_token, q=query).execute()
incidents = []
# so far, so good
LOG('GMAIL: possible new incidents are %s' % (result,))
for msg in result.get('messages', []):
msg_id = msg['id']
if msg_id in ignore_ids:
demisto.info(f'Ignoring msg id: {msg_id} as it is in the ignore list')
ignore_list_used = True
continue
msg_result = service.users().messages().get(
id=msg_id, userId=user_key).execute()
incident, occurred, is_valid_date = client.mail_to_incident(msg_result, service, user_key)
if not is_valid_date: # if we can't trust the date store the msg id in the ignore list
demisto.info(f'appending to ignore list msg id: {msg_id}. name: {incident.get("name")}')
ignore_list_used = True
ignore_ids.append(msg_id)
# update last run only if we trust the occurred timestamp
if is_valid_date and occurred > next_last_fetch:
next_last_fetch = occurred + timedelta(seconds=1)
# avoid duplication due to weak time query
if (not is_valid_date) or (occurred >= last_fetch):
incidents.append(incident)
else:
demisto.info(
f'skipped incident with lower date: {occurred} than fetch: {last_fetch} name: {incident.get("name")}')
demisto.info('extract {} incidents'.format(len(incidents)))
next_page_token = result.get('nextPageToken', '')
if next_page_token:
# we still have more results
demisto.info(f'keeping current last fetch: {last_fetch} as result has additional pages to fetch.'
f' token: {next_page_token}. Ignoring incremented last_fatch: {next_last_fetch}')
else:
demisto.debug(f'will use new last fetch date (no next page token): {next_last_fetch}')
# if we are not in a tokenized search and we didn't use the ignore ids we can reset it
if (not page_token) and (not ignore_list_used) and (len(ignore_ids) > 0):
demisto.info(f'reseting igonre list of len: {len(ignore_ids)}')
ignore_ids = []
last_fetch = next_last_fetch
demisto.setLastRun({
'gmt_time': client.get_date_isoformat_server(last_fetch),
'next_gmt_time': client.get_date_isoformat_server(next_last_fetch),
'page_token': next_page_token,
'ignore_ids': ignore_ids,
'ignore_list_used': ignore_list_used,
})
return incidents
|
def fetch_incidents(client: Client):
user_key = 'me'
query = '' if params['query'] is None else params['query']
last_run = demisto.getLastRun()
demisto.debug(f'last run: {last_run}')
last_fetch = last_run.get('gmt_time')
next_last_fetch = last_run.get('next_gmt_time')
page_token = last_run.get('page_token') or None
ignore_ids: List[str] = last_run.get('ignore_ids') or []
ignore_list_used = last_run.get('ignore_list_used') or False # can we reset the ignore list if we haven't used it
# handle first time fetch - gets current GMT time -1 day
if not last_fetch:
last_fetch, _ = parse_date_range(date_range=FETCH_TIME, utc=True, to_timestamp=False)
last_fetch = str(last_fetch.isoformat(timespec='seconds')) + 'Z'
# use replace(tzinfo) to make the datetime aware of the timezone as all other dates we use are aware
last_fetch = client.parse_date_isoformat_server(last_fetch)
if next_last_fetch:
next_last_fetch = client.parse_date_isoformat_server(next_last_fetch)
else:
next_last_fetch = last_fetch + timedelta(seconds=1)
service = client.get_service('gmail', 'v1')
# use seconds for the filter (note that it is inclusive)
# see: https://developers.google.com/gmail/api/guides/filtering
query += f' after:{int(last_fetch.timestamp())}'
max_results = MAX_FETCH
if MAX_FETCH > 200:
max_results = 200
LOG(f'GMAIL: fetch parameters: user: {user_key} query={query}'
f' fetch time: {last_fetch} page_token: {page_token} max results: {max_results}')
result = service.users().messages().list(
userId=user_key, maxResults=max_results, pageToken=page_token, q=query).execute()
incidents = []
# so far, so good
LOG(f'GMAIL: possible new incidents are {result}')
for msg in result.get('messages', []):
msg_id = msg['id']
if msg_id in ignore_ids:
demisto.info(f'Ignoring msg id: {msg_id} as it is in the ignore list')
ignore_list_used = True
continue
msg_result = service.users().messages().get(
id=msg_id, userId=user_key).execute()
incident, occurred, is_valid_date = client.mail_to_incident(msg_result, service, user_key)
if not is_valid_date: # if we can't trust the date store the msg id in the ignore list
demisto.info(f'appending to ignore list msg id: {msg_id}. name: {incident.get("name")}')
ignore_list_used = True
ignore_ids.append(msg_id)
# update last run only if we trust the occurred timestamp
if is_valid_date and occurred > next_last_fetch:
next_last_fetch = occurred + timedelta(seconds=1)
# avoid duplication due to weak time query
if (not is_valid_date) or (occurred >= last_fetch):
incidents.append(incident)
else:
demisto.info(
f'skipped incident with lower date: {occurred} than fetch: {last_fetch} name: {incident.get("name")}')
demisto.info('extract {} incidents'.format(len(incidents)))
next_page_token = result.get('nextPageToken', '')
if next_page_token:
# we still have more results
demisto.info(f'keeping current last fetch: {last_fetch} as result has additional pages to fetch.'
f' token: {next_page_token}. Ignoring incremented last_fatch: {next_last_fetch}')
else:
demisto.debug(f'will use new last fetch date (no next page token): {next_last_fetch}')
# if we are not in a tokenized search and we didn't use the ignore ids we can reset it
if (not page_token) and (not ignore_list_used) and (len(ignore_ids) > 0):
demisto.info(f'reseting igonre list of len: {len(ignore_ids)}')
ignore_ids = []
last_fetch = next_last_fetch
demisto.setLastRun({
'gmt_time': client.get_date_isoformat_server(last_fetch),
'next_gmt_time': client.get_date_isoformat_server(next_last_fetch),
'page_token': next_page_token,
'ignore_ids': ignore_ids,
'ignore_list_used': ignore_list_used,
})
return incidents
|
55,510 |
def _read(**kwargs) -> DataFrame:
"""
General documentation in `modin.pandas.read_csv`.
Experimental feature is simultaneous reading from multiple csv files which are
defined using glob pattern. Works only for local files.
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
Modin DataFrame.
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
def _read(**kwargs) -> DataFrame:
"""
General documentation in `modin.pandas.read_csv`.
This experimental feature provides parallel reading from multiple csv files which are
defined by glob pattern. Works for local files only!
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
Modin DataFrame.
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
8,285 |
def _readcube(rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy): # pragma: no cover
"""
Read spectrum image (and SEM/STEM image) from pts file
Parameters
----------
rawdata : numpy.ndarray
Spectrum image part of pts file.
frame_start_index : np.ndarray of shape (sweep+1, ) or (0, )
The indices of each frame start. If length is zero, the indices will be
determined from rawdata.
frame_list : list
List of frames to be read.
width, height : int
The navigation dimension.
channel_number : int
The number of channels.
width_norm, height_norm : int
Rebin factor of the navigation dimension
rebin_energy : int
Rebin factor of the energy dimension
sweep : int
Number of sweep
frame_shifts : list
The list of image positions [[x0,y0,z0], ...]. The x, y, z values can
be negative. The data points outside data cube are ignored.
Returns
-------
data : numpy.ndarray or dask.array
The spectrum image with shape (frame, x, y, energy) if sum_frames is
False, otherwise (x, y, energy).
If lazy is True, the dask array is a COO sparse array
em_data : numpy.ndarray or dask.array
The SEM/STEM image with shape (frame, x, y) if sum_frames is False,
otherwise (x, y).
has_em_image : bool
True if the stream contains SEM/STEM images.
sweep : int
The number of loaded frames.
frame_start_index : list
The indices of each frame start.
max_shift : numpy.ndarray
The maximum shifts of the origin in the navigation dimension
frame_shifts : numpy.ndarray
The shifts of the origin in the navigation dimension for each frame.
"""
import dask.array as da
# In case of sum_frames, spectrum image and SEM/STEM image are summing up to the same frame number.
# To avoid overflow on integration of SEM/STEM image, data type of np.uint32 is selected
# for 16 frames and over. (range of image intensity in each frame is 0-4095 (0-0xfff))
EM_dtype = np.uint16
frame_step = 1
if sum_frames:
frame_step = 0
if sweep >= 16:
EM_dtype = np.uint32
n_frames = 1
else:
n_frames = sweep + 1
if lazy:
hypermap = np.zeros((n_frames), dtype=EM_dtype) # dummy variable, not used
data_list = []
else:
hypermap = np.zeros((n_frames, height, width, channel_number),
dtype=SI_dtype)
em_image = np.zeros((n_frames, width, height), dtype=EM_dtype)
max_value = np.iinfo(SI_dtype).max
frame_shifts = np.asarray(frame_shifts)
frame_list = np.asarray(frame_list)
max_shift = frame_shifts[frame_list].max(axis=0)
min_shift = frame_shifts[frame_list].min(axis=0)
# sxyz = np.array([min_shift[0]-max_shift[0], min_shift[1]-max_shift[1],0])
min_shift[2]=0
max_shift[2]=0
sxyz = min_shift-max_shift
frame_shifts -= max_shift
width += sxyz[1]
height += sxyz[0]
if lazy:
readframe = _readframe_lazy
else:
readframe = _readframe_dense
frame_num = 0
p_start = 0
target_frame_num = 0
eof = rawdata.size
countup = 1
has_em_image = False
for frame_idx in frame_list:
if frame_idx < 0:
continue
elif frame_start_index[frame_idx] >= 0:
# if frame_idx is already indexed
p_start = frame_start_index[frame_idx]
elif frame_num < frame_idx and frame_start_index[frame_num] < 0:
# record start point of frame and skip frame
frame_start_index[frame_num] = p_start
p_start += _readframe_dummy(rawdata[p_start:])
frame_num += 1
continue
else:
frame_start_index[frame_idx] = p_start # = end of last frame
if frame_idx < frame_shifts.size:
fs = frame_shifts[frame_idx]
else:
fs = np.zeros(3, np.uint16)
_logger.info(f"Size of frame_shift array is too small. The frame {frame_idx} is not moved.")
length, frame_data, has_em, valid, max_valid = readframe(
rawdata[p_start:], 1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0], fs[2], max_value)
has_em_image = has_em_image or has_em
if length == 0: # no data
break
if valid or not only_valid_data:
# accept last frame
if lazy:
data_list.append(frame_data)
frame_num += 1
target_frame_num += frame_step
else:
# incomplete data, not accepted
if sum_frames:
# subtract signal counts of last frame
_ = readframe(rawdata[p_start:], -1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0],fs[2], max_value)
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
break
# else:
# pass
p_start += length
if not lazy:
if sum_frames:
# the first frame has integrated intensity
return hypermap[0,:height,:width], em_image[0,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
else:
return hypermap[:target_frame_num,:height,:width], em_image[:target_frame_num,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
# for lazy loading
from hyperspy.misc.io.fei_stream_readers import DenseSliceCOO
length = np.sum([len(d) for d in data_list])
# length = number of data points
# v : [[frame_no, y, x, energy_channel, 1], ....]
v = np.zeros(shape=(5, length), dtype=np.uint16)
ptr = 0
frame_count = 0
for d in data_list:
# d : data points in one frame
d = np.asarray(d)
# check if the pixels are in the valid data cube
# (frame_shifts make partially integrated area at the rim)
valid_cube = np.where((0<=d[:,0]) & (d[:,0]<height) & (0<=d[:,1]) & (d[:,1]<width) & (0<=d[:,2]) & (d[:,2]<channel_number))
d = d[valid_cube]
flen = len(d)
pv = v[:,ptr:ptr+flen]
pv[1:4, :] = np.array(d).transpose()
pv[0,:] = frame_count
pv[4,:] = 1
ptr += flen
frame_count += 1
if sum_frames:
data_shape = [height, width, channel_number]
ar_s = DenseSliceCOO(v[1:4], v[4], shape=data_shape)
else:
data_shape = [frame_count, height, width, channel_number]
ar_s = DenseSliceCOO(v[0:4], v[4], shape=data_shape)
if sum_frames:
em_image = em_image[0]
return da.from_array(ar_s, asarray=False), em_image, has_em_image, sweep, frame_start_index, valid, max_shift, frame_shifts
|
def _readcube(rawdata, frame_start_index, frame_list,
width, height, channel_number,
width_norm, height_norm, rebin_energy,
SI_dtype, sweep, frame_shifts,
sum_frames, read_em_image, only_valid_data, lazy): # pragma: no cover
"""
Read spectrum image (and SEM/STEM image) from pts file
Parameters
----------
rawdata : numpy.ndarray
Spectrum image part of pts file.
frame_start_index : np.ndarray of shape (sweep+1, ) or (0, )
The indices of each frame start. If length is zero, the indices will be
determined from rawdata.
frame_list : list
List of frames to be read.
width, height : int
The navigation dimension.
channel_number : int
The number of channels.
width_norm, height_norm : int
Rebin factor of the navigation dimension.
rebin_energy : int
Rebin factor of the energy dimension
sweep : int
Number of sweep
frame_shifts : list
The list of image positions [[x0,y0,z0], ...]. The x, y, z values can
be negative. The data points outside data cube are ignored.
Returns
-------
data : numpy.ndarray or dask.array
The spectrum image with shape (frame, x, y, energy) if sum_frames is
False, otherwise (x, y, energy).
If lazy is True, the dask array is a COO sparse array
em_data : numpy.ndarray or dask.array
The SEM/STEM image with shape (frame, x, y) if sum_frames is False,
otherwise (x, y).
has_em_image : bool
True if the stream contains SEM/STEM images.
sweep : int
The number of loaded frames.
frame_start_index : list
The indices of each frame start.
max_shift : numpy.ndarray
The maximum shifts of the origin in the navigation dimension
frame_shifts : numpy.ndarray
The shifts of the origin in the navigation dimension for each frame.
"""
import dask.array as da
# In case of sum_frames, spectrum image and SEM/STEM image are summing up to the same frame number.
# To avoid overflow on integration of SEM/STEM image, data type of np.uint32 is selected
# for 16 frames and over. (range of image intensity in each frame is 0-4095 (0-0xfff))
EM_dtype = np.uint16
frame_step = 1
if sum_frames:
frame_step = 0
if sweep >= 16:
EM_dtype = np.uint32
n_frames = 1
else:
n_frames = sweep + 1
if lazy:
hypermap = np.zeros((n_frames), dtype=EM_dtype) # dummy variable, not used
data_list = []
else:
hypermap = np.zeros((n_frames, height, width, channel_number),
dtype=SI_dtype)
em_image = np.zeros((n_frames, width, height), dtype=EM_dtype)
max_value = np.iinfo(SI_dtype).max
frame_shifts = np.asarray(frame_shifts)
frame_list = np.asarray(frame_list)
max_shift = frame_shifts[frame_list].max(axis=0)
min_shift = frame_shifts[frame_list].min(axis=0)
# sxyz = np.array([min_shift[0]-max_shift[0], min_shift[1]-max_shift[1],0])
min_shift[2]=0
max_shift[2]=0
sxyz = min_shift-max_shift
frame_shifts -= max_shift
width += sxyz[1]
height += sxyz[0]
if lazy:
readframe = _readframe_lazy
else:
readframe = _readframe_dense
frame_num = 0
p_start = 0
target_frame_num = 0
eof = rawdata.size
countup = 1
has_em_image = False
for frame_idx in frame_list:
if frame_idx < 0:
continue
elif frame_start_index[frame_idx] >= 0:
# if frame_idx is already indexed
p_start = frame_start_index[frame_idx]
elif frame_num < frame_idx and frame_start_index[frame_num] < 0:
# record start point of frame and skip frame
frame_start_index[frame_num] = p_start
p_start += _readframe_dummy(rawdata[p_start:])
frame_num += 1
continue
else:
frame_start_index[frame_idx] = p_start # = end of last frame
if frame_idx < frame_shifts.size:
fs = frame_shifts[frame_idx]
else:
fs = np.zeros(3, np.uint16)
_logger.info(f"Size of frame_shift array is too small. The frame {frame_idx} is not moved.")
length, frame_data, has_em, valid, max_valid = readframe(
rawdata[p_start:], 1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0], fs[2], max_value)
has_em_image = has_em_image or has_em
if length == 0: # no data
break
if valid or not only_valid_data:
# accept last frame
if lazy:
data_list.append(frame_data)
frame_num += 1
target_frame_num += frame_step
else:
# incomplete data, not accepted
if sum_frames:
# subtract signal counts of last frame
_ = readframe(rawdata[p_start:], -1,
hypermap[target_frame_num], em_image[target_frame_num],
width, height, channel_number,
width_norm, height_norm, rebin_energy,
fs[1], fs[0],fs[2], max_value)
_logger.info("The last frame (sweep) is incomplete because the acquisition stopped during this frame. The partially acquired frame is ignored. Use 'sum_frames=False, only_valid_data=False' to read all frames individually, including the last partially completed frame.")
break
# else:
# pass
p_start += length
if not lazy:
if sum_frames:
# the first frame has integrated intensity
return hypermap[0,:height,:width], em_image[0,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
else:
return hypermap[:target_frame_num,:height,:width], em_image[:target_frame_num,:height,:width], has_em_image, frame_num, frame_start_index, valid, max_shift, frame_shifts
# for lazy loading
from hyperspy.misc.io.fei_stream_readers import DenseSliceCOO
length = np.sum([len(d) for d in data_list])
# length = number of data points
# v : [[frame_no, y, x, energy_channel, 1], ....]
v = np.zeros(shape=(5, length), dtype=np.uint16)
ptr = 0
frame_count = 0
for d in data_list:
# d : data points in one frame
d = np.asarray(d)
# check if the pixels are in the valid data cube
# (frame_shifts make partially integrated area at the rim)
valid_cube = np.where((0<=d[:,0]) & (d[:,0]<height) & (0<=d[:,1]) & (d[:,1]<width) & (0<=d[:,2]) & (d[:,2]<channel_number))
d = d[valid_cube]
flen = len(d)
pv = v[:,ptr:ptr+flen]
pv[1:4, :] = np.array(d).transpose()
pv[0,:] = frame_count
pv[4,:] = 1
ptr += flen
frame_count += 1
if sum_frames:
data_shape = [height, width, channel_number]
ar_s = DenseSliceCOO(v[1:4], v[4], shape=data_shape)
else:
data_shape = [frame_count, height, width, channel_number]
ar_s = DenseSliceCOO(v[0:4], v[4], shape=data_shape)
if sum_frames:
em_image = em_image[0]
return da.from_array(ar_s, asarray=False), em_image, has_em_image, sweep, frame_start_index, valid, max_shift, frame_shifts
|
6,484 |
def execute(filters=None):
period_list = get_period_list(filters.from_fiscal_year, filters.to_fiscal_year,
filters.period_start_date, filters.period_end_date, filters.filter_based_on, filters.periodicity,
company=filters.company)
income = get_data(filters.company, "Income", "Credit", period_list, filters = filters,
accumulated_values=filters.accumulated_values,
ignore_closing_entries=True, ignore_accumulated_values_for_fy= True)
expense = get_data(filters.company, "Expense", "Debit", period_list, filters=filters,
accumulated_values=filters.accumulated_values,
ignore_closing_entries=True, ignore_accumulated_values_for_fy= True)
net_profit_loss = get_net_profit_loss(income, expense, period_list, filters.company, filters.presentation_currency)
data = []
data.extend(income or [])
data.extend(expense or [])
if net_profit_loss:
data.append(net_profit_loss)
columns = get_columns(filters.periodicity, period_list, filters.accumulated_values, filters.company)
chart = get_chart_data(filters, columns, income, expense, net_profit_loss)
default_currency = filters.presentation_currency or frappe.get_cached_value('Company', filters.company, "default_currency")
report_summary = get_report_summary(period_list, filters.periodicity, income, expense, net_profit_loss, default_currency)
return columns, data, None, chart, report_summary
|
def execute(filters=None):
period_list = get_period_list(filters.from_fiscal_year, filters.to_fiscal_year,
filters.period_start_date, filters.period_end_date, filters.filter_based_on, filters.periodicity,
company=filters.company)
income = get_data(filters.company, "Income", "Credit", period_list, filters = filters,
accumulated_values=filters.accumulated_values,
ignore_closing_entries=True, ignore_accumulated_values_for_fy= True)
expense = get_data(filters.company, "Expense", "Debit", period_list, filters=filters,
accumulated_values=filters.accumulated_values,
ignore_closing_entries=True, ignore_accumulated_values_for_fy= True)
net_profit_loss = get_net_profit_loss(income, expense, period_list, filters.company, filters.presentation_currency)
data = []
data.extend(income or [])
data.extend(expense or [])
if net_profit_loss:
data.append(net_profit_loss)
columns = get_columns(filters.periodicity, period_list, filters.accumulated_values, filters.company)
chart = get_chart_data(filters, columns, income, expense, net_profit_loss)
currency = filters.presentation_currency or frappe.get_cached_value('Company', filters.company, "default_currency")
report_summary = get_report_summary(period_list, filters.periodicity, income, expense, net_profit_loss, default_currency)
return columns, data, None, chart, report_summary
|
58,819 |
def eigsh(a, k=6, which='LM', ncv=None, maxiter=None, tol=0,
return_eigenvectors=True):
"""Finds ``k`` eigenvalues and eigenvectors of the real symmetric matrix.
Solves ``Ax = wx``, the standard eigenvalue problem for ``w`` eigenvalues
with corresponding eigenvectors ``x``.
Args:
a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A symmetric square
matrix with dimension ``(n, n)``.
k (int): The number of eigenvalues and eigenvectors to compute. Must be
``1 <= k < n``.
which (str): 'LM' or 'LA'. 'LM': finds ``k`` largest (in magnitude)
eigenvalues. 'LA': finds ``k`` largest (algebraic) eigenvalues.
ncv (int): The number of Lanczos vectors generated. Must be
``k + 1 < ncv < n``. If ``None``, default value is used.
maxiter (int): Maximum number of Lanczos update iterations.
If ``None``, default value is used.
tol (float): Tolerance for residuals ``||Ax - wx||``. If ``0``, machine
precision is used.
return_eigenvectors (bool): If ``True``, returns eigenvectors in
addition to eigenvalues.
Returns:
tuple:
If ``return_eigenvectors is True``, it returns ``w`` and ``x``
where ``w`` is eigenvalues and ``x`` is eigenvectors. Otherwise,
it returns only ``w``.
.. seealso:: :func:`scipy.sparse.linalg.eigsh`
.. note::
This function uses the thick-restart Lanczos methos
(https://sdm.lbl.gov/~kewu/ps/trlan.html).
"""
n = a.shape[0]
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(a.shape))
if a.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
if k <= 0:
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
if k >= n:
raise ValueError('k must be smaller than n (actual: {})'.format(k))
if which not in ('LM', 'LA'):
raise ValueError('which must be \'LM\' or \'LA\' (actual: {})'
''.format(which))
if ncv is None:
ncv = min(max(8 * k, 16), n - 1)
else:
ncv = min(max(ncv, k + 2), n - 1)
if maxiter is None:
maxiter = 10 * n
if tol == 0:
tol = numpy.finfo(a.dtype).eps
alpha = cupy.zeros((ncv, ), dtype=a.dtype)
beta = cupy.zeros((ncv, ), dtype=a.dtype)
V = cupy.empty((ncv, n), dtype=a.dtype)
# Set initial vector
u = cupy.random.random((n, )).astype(a.dtype)
v = u / cupy.linalg.norm(u)
V[0] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, 0, ncv)
iter = ncv
w, s = _eigsh_solve_ritz(alpha, beta, None, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
while res > tol:
# Setup for thick-restart
beta[:k] = 0
alpha[:k] = w
V[:k] = x.T
u = u - u.T @ V[:k].conj().T @ V[:k]
v = u / cupy.linalg.norm(u)
V[k] = v
u = a @ v
alpha[k] = v.conj().T @ u
u = u - alpha[k] * v
u = u - V[:k].T @ beta_k
u = u - u.T @ V[:k+1].conj().T @ V[:k+1]
beta[k] = cupy.linalg.norm(u)
v = u / beta[k]
V[k+1] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, k+1, ncv)
iter += ncv - k
w, s = _eigsh_solve_ritz(alpha, beta, beta_k, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
if iter >= maxiter:
break
idx = cupy.argsort(w)
w = w[idx]
x = x[:, idx]
if return_eigenvectors:
return w, x
else:
return w
|
def eigsh(a, k=6, which='LM', ncv=None, maxiter=None, tol=0,
return_eigenvectors=True):
"""Finds ``k`` eigenvalues and eigenvectors of the real symmetric matrix.
Solves ``Ax = wx``, the standard eigenvalue problem for ``w`` eigenvalues
with corresponding eigenvectors ``x``.
Args:
a (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): A symmetric square
matrix with dimension ``(n, n)``.
k (int): The number of eigenvalues and eigenvectors to compute. Must be
``1 <= k < n``.
which (str): 'LM' or 'LA'. 'LM': finds ``k`` largest (in magnitude)
eigenvalues. 'LA': finds ``k`` largest (algebraic) eigenvalues.
ncv (int): The number of Lanczos vectors generated. Must be
``k + 1 < ncv < n``. If ``None``, default value is used.
maxiter (int): Maximum number of Lanczos update iterations.
If ``None``, default value is used.
tol (float): Tolerance for residuals ``||Ax - wx||``. If ``0``, machine
precision is used.
return_eigenvectors (bool): If ``True``, returns eigenvectors in
addition to eigenvalues.
Returns:
tuple:
If ``return_eigenvectors is True``, it returns ``w`` and ``x``
where ``w`` is eigenvalues and ``x`` is eigenvectors. Otherwise,
it returns only ``w``.
.. seealso:: :func:`scipy.sparse.linalg.eigsh`
.. note::
This function uses the thick-restart Lanczos methos
(https://sdm.lbl.gov/~kewu/ps/trlan.html).
"""
n = a.shape[0]
if a.ndim != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(a.shape))
if a.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(a.dtype))
if k <= 0:
raise ValueError('k must be greater than 0 (actual: {})'.format(k))
if k >= n:
raise ValueError('k must be smaller than n (actual: {})'.format(k))
if which not in ('LM', 'LA'):
raise ValueError('which must be \'LM\' or \'LA\' (actual: {})'
''.format(which))
if ncv is None:
ncv = min(max(8 * k, 16), n - 1)
else:
ncv = min(max(ncv, k + 2), n - 1)
if maxiter is None:
maxiter = 10 * n
if tol == 0:
tol = numpy.finfo(a.dtype).eps
alpha = cupy.zeros((ncv, ), dtype=a.dtype)
beta = cupy.zeros((ncv, ), dtype=a.dtype)
V = cupy.empty((ncv, n), dtype=a.dtype)
# Set initial vector
u = cupy.random.random((n, )).astype(a.dtype)
v = u / cupy.linalg.norm(u)
V[0] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, 0, ncv)
iter = ncv
w, s = _eigsh_solve_ritz(alpha, beta, None, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
while res > tol:
# Setup for thick-restart
beta[:k] = 0
alpha[:k] = w
V[:k] = x.T
u = u - u.T @ V[:k].conj().T @ V[:k]
v = u / cupy.linalg.norm(u)
V[k] = v
u = a @ v
alpha[k] = v.conj().T @ u
u -= alpha[k] * v
u -= V[:k].T @ beta_k
u -= u.T @ V[:k+1].conj().T @ V[:k+1]
beta[k] = cupy.linalg.norm(u)
v = u / beta[k]
V[k+1] = v
# Lanczos iteration
u = _eigsh_lanczos_update(a, V, alpha, beta, k+1, ncv)
iter += ncv - k
w, s = _eigsh_solve_ritz(alpha, beta, beta_k, k, which)
x = V.T @ s
# Compute residual
beta_k = beta[-1] * s[-1, :]
res = cupy.linalg.norm(beta_k)
if iter >= maxiter:
break
idx = cupy.argsort(w)
w = w[idx]
x = x[:, idx]
if return_eigenvectors:
return w, x
else:
return w
|
55,614 |
def Clustergram(
data,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
row_colors=None,
column_labels=None,
column_colors=None,
hidden_labels=None,
standardize="none",
cluster="all",
row_dist="euclidean",
col_dist="euclidean",
dist_fun=scs.distance.pdist,
link_fun=None,
link_method=None,
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
center_values=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color="rgba(0,0,0,0)",
plot_bg_color="rgba(0,0,0,0)",
height=500,
width=500,
):
"""Return a Dash Bio Clustergram object.
Keyword arguments:
- data (2D array-like; required): Matrix or table of observations (dropping
columns of non-numeric dtype).
- generate_curves_dict (bool; default False): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
for capturing the cluster number that is clicked.)
- return_computed_traces (bool; default False): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
(precomputed) Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels).
- row_colors (list; optional): List of row colors
(observation colors).
- column_labels (list; optional): List of column category labels
(observation labels).
- column_colors (list; optional): List of column colors
(observation colors).
- hidden_labels (list; optional): List containing strings 'row' and/or 'col'
if row and/or column labels should be hidden on the final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1,
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data will be
clustered along columns, then clustered along rows of
column-clustered data.
- row_dist (string; default 'euclidean'): Distance metric for rows.
Passed as argument `metric` to the function specified in `dist_fun`
when called for clustering along rows.
- col_dist (string; default 'euclidean'): Distance metric for columns.
Passed as argument `metric` to the function specified in `dist_fun`
when called for clustering along columns.
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see docs for
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see docs for
scipy.cluster.hierarchy.linkage).
- link_method (string; default 'complete'): The linkage algorithm to use
if link_fun not set. For method 'single', an optimized algorithm based
on minimum spanning, for methods 'complete', 'average', 'weighted' and
'ward', an algorithm called nearest-neighbors chain is implemented
(see docs for scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default False): Whether to enable (True) or
disable (False) the option to determine leaf order that maximizes
similarity between neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): Colorscale for the heatmap.
Top-level elements contain two elements, the first of which refers to
the percentile rank, and the second to the applied color. For instance,
[[0.0, 'white'], [0.5, 'gray'], [1.0, 'black']] means that cells in the
49th percentile would be white; cells at the 50th or higher percentiles,
excluding the 100th percentile, would be gray; and the cell(s) at the
100th percentile would be black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension, the colors of the clusters will cycle
through the colors specified in the list. The keys are: 'row' (for
row clusters), 'col' (for column clusters), and 'bg' (for all
traces above the clustering threshold for both row and column).
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- center_values (bool; default True): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default False): Whether or not to transform
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0): 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional): A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objects documentation (see:
https://plotly.com/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objects documentation (see:
https://plotly.cp,/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop("return_computed_traces")
kwargs.pop("computed_traces")
kwargs.pop("generate_curves_dict")
print(row_colors)
(fig, ct, curves_dict) = _Clustergram(**kwargs).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
def Clustergram(
data,
generate_curves_dict=False,
return_computed_traces=False,
computed_traces=None,
row_labels=None,
row_colors=None,
column_labels=None,
column_colors=None,
hidden_labels=None,
standardize="none",
cluster="all",
row_dist="euclidean",
col_dist="euclidean",
dist_fun=scs.distance.pdist,
link_fun=None,
link_method=None,
color_threshold=None,
optimal_leaf_order=False,
color_map=None,
color_list=None,
display_range=3,
center_values=True,
log_transform=False,
display_ratio=0.2,
imputer_parameters=None,
row_group_marker=None, # group number, annotation, color
col_group_marker=None, # same as above
tick_font=None,
annotation_font=None,
line_width=0.5,
paper_bg_color="rgba(0,0,0,0)",
plot_bg_color="rgba(0,0,0,0)",
height=500,
width=500,
):
"""Return a Dash Bio Clustergram object.
Keyword arguments:
- data (2D array-like; required): Matrix or table of observations (dropping
columns of non-numeric dtype).
- generate_curves_dict (bool; default False): Whether or not to return a
dictionary containing information about the cluster number
associated with each curve number in the graph. (May be useful
for capturing the cluster number that is clicked.)
- return_computed_traces (bool; default False): Whether or not to return
the precomputed dendrogram traces. (May be useful if one wishes
to add, e.g., group markers to the figure without recalculating
the clustering in the entire figure.)
- computed_traces (dict; optional): The dendrogram traces from another
(precomputed) Clustergram component.
- row_labels (list; optional): List of row category labels
(observation labels).
- row_colors (list; optional): List of row colors
(observation colors).
- column_labels (list; optional): List of column category labels
(observation labels).
- column_colors (list; optional): List of column colors
(observation colors).
- hidden_labels (list; optional): List containing strings 'row' and/or 'col'
if row and/or column labels should be hidden on the final plot.
- standardize (string; default 'none'): The dimension for standardizing
values, so that the mean is 0 and the standard deviation is 1,
along the specified dimension: 'row', 'column', or 'none'.
- cluster (string; default 'all'): The dimension along which the data will
be clustered: 'row', 'column', or 'all'; 'all' means data will be
clustered along columns, then clustered along rows of
column-clustered data.
- row_dist (string; default 'euclidean'): Distance metric for rows.
Passed as argument `metric` to the function specified in `dist_fun`
when called for clustering along rows.
- col_dist (string; default 'euclidean'): Distance metric for columns.
Passed as argument `metric` to the function specified in `dist_fun`
when called for clustering along columns.
- dist_fun (function; default scipy.spatial.distance.pdist): Function
to compute the pairwise distance from the observations (see docs for
scipy.spatial.distance.pdist).
- link_fun (function; default scipy.cluster.hierarchy.linkage): Function to
compute the linkage matrix from the pairwise distances (see docs for
scipy.cluster.hierarchy.linkage).
- link_method (string; default 'complete'): The linkage algorithm to use
if link_fun not set. For method 'single', an optimized algorithm based
on minimum spanning, for methods 'complete', 'average', 'weighted' and
'ward', an algorithm called nearest-neighbors chain is implemented
(see docs for scipy.cluster.hierarchy.linkage).
- color_threshold (dict; default {'row': 0, 'col': 0}): Maximum
linkage value for which unique colors are assigned to clusters;
'row' for rows, and 'col' for columns.
- optimal_leaf_order (bool; default False): Whether to enable (True) or
disable (False) the option to determine leaf order that maximizes
similarity between neighboring leaves.
- color_map (list; default [[0.0, 'rgb(255,0,0)'], [0.5,
'rgb(0,0,0)'], [1.0, 'rgb(0,255,0)']]): Colorscale for the heatmap.
Top-level elements contain two elements, the first of which refers to
the percentile rank, and the second to the applied color. For instance,
[[0.0, 'white'], [0.5, 'gray'], [1.0, 'black']] means that cells in the
49th percentile would be white; cells at the 50th or higher percentiles,
excluding the 100th percentile, would be gray; and the cell(s) at the
100th percentile would be black.
- color_list (dict; optional): The list of colors to use for different
clusters in the dendrogram that have a root under the threshold for
each dimension. If there are fewer colors than there are clusters
along a specific dimension, the colors of the clusters will cycle
through the colors specified in the list. The keys are: 'row' (for
row clusters), 'col' (for column clusters), and 'bg' (for all
traces above the clustering threshold for both row and column).
- display_range (double; default 3.0): In the heatmap, standardized
values from the dataset that are below the negative of this value
will be colored with one shade, and the values that are above this
value will be colored with another.
- center_values (bool; default True): Whether or not to center the
values of the heatmap about zero.
- log_transform (bool; default False): Whether or not to transform
the data by taking the base-two logarithm of all values in the
dataset.
- display_ratio (list | number; default 0.2): The dendrograms' heights with
respect to the size of the heatmap; with one element, both the row
and column dendrograms have the same ratio; with two, the row
dendrogram ratio corresponds to the first element of the list and
the column dendrogram ratio corresponds to the second element of
the list.
- imputer_parameters (dict; optional): Specifies the parameters
'missing_values' and 'strategy' of the SimpleImputer class from
scikit-learn 0.20.1 (both of these parameters must be keys in the
dictionary). An additional parameter, 'axis', is used to specify
the direction along which to impute (a parameter of Imputer, which
was deprecated in scikit-learn 0.20.0): 'axis=0' indicates that
imputing should happen along columns, while 'axis=1' indicates
that it should happen along rows (see: https://scikit
-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html).
- row_group_marker (list; optional): A list containing the annotations
for row clusters in the dendrogram. Each annotation is a
dictionary with the keys 'group_number' (the cluster number to
highlight), 'annotation' (a string containing the text of the
annotation), and 'color' (a string representation of the color of
the annotation).
- col_group_marker (list; optional): A list containing the annotations for
column clusters in the dendrogram. Each annotation is a dictionary
with the keys 'group_number' (the cluster number to highlight),
'annotation' (a string containing the text of the annotation), and
'color' (a string representation of the color of the
annotation).
- tick_font (dict; optional): The font options for ticks, as specified
in the Plotly graph_objects documentation (see:
https://plotly.com/python/reference/#bar-marker-colorbar-tickfont).
- annotation_font (dict; optional): The font options for annotations,
as specified in the Plotly graph_objects documentation (see:
https://plotly.cp,/python/reference/#layout-scene-annotations-items-annotation-font).
- line_width (list | number; default 0.5): The line width for the
dendrograms. If in list format, the first element corresponds to
the width of the row dendrogram traces, and the second corresponds
to the width of the column dendrogram traces.
- paper_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the paper on the graph.
- plot_bg_color (string; default 'rgba(0,0,0,0)'): The background
color of the subplots on the graph.
- height (number; default 500): The height of the graph, in px.
- width (number; default 500): The width of the graph, in px.
"""
if color_threshold is None:
color_threshold = dict(row=0, col=0)
# get rid of arguments that are not used by _Clustergram
kwargs = locals()
kwargs.pop("return_computed_traces")
kwargs.pop("computed_traces")
kwargs.pop("generate_curves_dict")
(fig, ct, curves_dict) = _Clustergram(**kwargs).figure(
computed_traces=computed_traces
)
return_values = [go.Figure(fig)]
if generate_curves_dict:
return_values.append(curves_dict)
if return_computed_traces:
return_values.append(ct)
# return only the figure by default
if len(return_values) == 1:
return return_values[0]
# otherwise, return all requested values
return tuple(return_values)
|
30,352 |
def dict_value_to_int(target_dict: Dict, key: str):
"""
:param target_dict: A dictionary which has the key param
:param key: The key that we need to convert it's value to integer
:return: The integer representation of the key's value in the dict params
"""
try:
if target_dict:
value = target_dict.get(key)
if value:
target_dict[key] = int(value)
return target_dict[key]
except ValueError:
raise Exception(f'This value for {key} must be an integer.')
|
def dict_value_to_int(target_dict: Dict, key: str):
"""
:param target_dict: A dictionary which has the key param
:param key: The key that we need to convert it's value to integer
:return: The integer representation of the key's value in the dict params
"""
try:
if target_dict:
value = target_dict.get(key)
if value:
target_dict[key] = int(value)
return target_dict[key]
except ValueError:
raise ValueError(f'The value for {key} must be an integer.')
|
9,047 |
def rate_channel(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function in the same
channel, regardless of triggering user
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_channel(5, 'You hit the channel rate limit for this function.')
# channel limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the channel limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.channel_rate = rate
function.channel_rate_message = message
return function
return add_attribute
|
def rate_channel(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function in the same
channel, regardless of triggering user
:param message: optional; message sent as NOTICE when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_channel(5, 'You hit the channel rate limit for this function.')
# channel limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the channel limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.channel_rate = rate
function.channel_rate_message = message
return function
return add_attribute
|
2,046 |
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : ndarray of float of shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
|
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : ndarray of shape (n_features,), dtype=float
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
|
47,236 |
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits)).to(logits.device))
|
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
43,962 |
def repulsion_tensor(basis_functions):
r"""Return a function that computes the repulsion tensor for a given set of basis functions.
Args:
basis_functions (list[BasisFunction]): basis functions
Returns:
function: function that computes the repulsion tensor
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True),
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> repulsion_tensor(mol.basis_set)(*args)
array([[[[0.77460595, 0.56886144], [0.56886144, 0.65017747]],
[[0.56886144, 0.45590152], [0.45590152, 0.56886144]]],
[[[0.56886144, 0.45590152], [0.45590152, 0.56886144]],
[[0.65017747, 0.56886144],[0.56886144, 0.77460595]]]])
"""
def repulsion(*args):
r"""Construct the repulsion tensor for a given set of basis functions.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the repulsion tensor
"""
n = len(basis_functions)
e = anp.zeros((n, n, n, n))
e_calc = []
for i, a in enumerate(basis_functions):
for j, b in enumerate(basis_functions):
for k, c in enumerate(basis_functions):
for l, d in enumerate(basis_functions):
if [i, j, k, l] not in e_calc:
if args:
args_abcd = []
for m in range(len(args)):
args_abcd.append(args[m][[i, j, k, l]])
repulsion_integral = generate_repulsion(a, b, c, d)(*args_abcd)
else:
repulsion_integral = generate_repulsion(a, b, c, d)()
o = anp.zeros((n, n, n, n))
o[i, j, k, l] = o[k, l, i, j] = o[j, i, l, k] = o[l, k, j, i] = 1.0
o[j, i, k, l] = o[l, k, i, j] = o[i, j, l, k] = o[k, l, j, i] = 1.0
e = e + repulsion_integral * o
e_calc = e_calc + [
[i, j, k, l],
[k, l, i, j],
[j, i, l, k],
[l, k, j, i],
[j, i, k, l],
[l, k, i, j],
[i, j, l, k],
[k, l, j, i],
]
return e
return repulsion
|
def repulsion_tensor(basis_functions):
r"""Return a function that computes the electron repulsion tensor for a given set of basis functions.
Args:
basis_functions (list[BasisFunction]): basis functions
Returns:
function: function that computes the repulsion tensor
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False)
>>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554],
>>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True),
>>> mol = Molecule(symbols, geometry, alpha=alpha)
>>> args = [alpha]
>>> repulsion_tensor(mol.basis_set)(*args)
array([[[[0.77460595, 0.56886144], [0.56886144, 0.65017747]],
[[0.56886144, 0.45590152], [0.45590152, 0.56886144]]],
[[[0.56886144, 0.45590152], [0.45590152, 0.56886144]],
[[0.65017747, 0.56886144],[0.56886144, 0.77460595]]]])
"""
def repulsion(*args):
r"""Construct the repulsion tensor for a given set of basis functions.
Args:
args (array[float]): initial values of the differentiable parameters
Returns:
array[float]: the repulsion tensor
"""
n = len(basis_functions)
e = anp.zeros((n, n, n, n))
e_calc = []
for i, a in enumerate(basis_functions):
for j, b in enumerate(basis_functions):
for k, c in enumerate(basis_functions):
for l, d in enumerate(basis_functions):
if [i, j, k, l] not in e_calc:
if args:
args_abcd = []
for m in range(len(args)):
args_abcd.append(args[m][[i, j, k, l]])
repulsion_integral = generate_repulsion(a, b, c, d)(*args_abcd)
else:
repulsion_integral = generate_repulsion(a, b, c, d)()
o = anp.zeros((n, n, n, n))
o[i, j, k, l] = o[k, l, i, j] = o[j, i, l, k] = o[l, k, j, i] = 1.0
o[j, i, k, l] = o[l, k, i, j] = o[i, j, l, k] = o[k, l, j, i] = 1.0
e = e + repulsion_integral * o
e_calc = e_calc + [
[i, j, k, l],
[k, l, i, j],
[j, i, l, k],
[l, k, j, i],
[j, i, k, l],
[l, k, i, j],
[i, j, l, k],
[k, l, j, i],
]
return e
return repulsion
|
37,042 |
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
def _text_checker(job, interval, _interval_set=False, quiet=False, to_file=None):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
"""
_outstream = to_file if to_file else sys.stdout
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=_outstream)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
prev_msg = msg
if not quiet:
print('', file=_outstream)
|
26,101 |
def send_notification(message, title):
""" Sends notification using the preferred service """
notification_settings = NotificationSetting.get_solo()
DATA_FORMAT = {
NotificationSetting.NOTIFICATION_PUSHOVER: {
'url': NotificationSetting.PUSHOVER_API_URL,
'data': {
'token': notification_settings.pushover_api_key,
'user': notification_settings.pushover_user_key,
'priority': '-1',
'title': title,
'message': message
}
},
NotificationSetting.NOTIFICATION_PROWL: {
'url': NotificationSetting.PROWL_API_URL,
'data': {
'apikey': notification_settings.prowl_api_key,
'priority': '-2',
'application': 'DSMR-Reader',
'event': title,
'description': message
}
},
NotificationSetting.NOTIFICATION_TELEGRAM: {
'url': NotificationSetting.TELEGRAM_API_URL + notification_settings.telegram_api_key + '/sendMessage',
'data': {
'chat_id': notification_settings.telegram_chat_id,
'disable_notification': 'true',
'text': message
}
},
}
response = requests.post(
**DATA_FORMAT[notification_settings.notification_service]
)
if response.status_code == 200:
return
# Invalid request, do not retry.
if str(response.status_code).startswith('4'):
logger.error(' - Notification API returned client error, wiping settings...')
NotificationSetting.objects.update(
notification_service=None,
pushover_api_key=None,
pushover_user_key=None,
prowl_api_key=None,
next_notification=None,
telegram_api_key=None,
telegram_chat_id=None
)
Notification.objects.create(
message='Notification API error, settings are reset. Error: {}'.format(response.text),
redirect_to='admin:dsmr_notification_notificationsetting_changelist'
)
# Server error, delay a bit.
elif str(response.status_code).startswith('5'):
logger.warning(' - Notification API returned server error, retrying later...')
NotificationSetting.objects.update(
next_notification=timezone.now() + timezone.timedelta(minutes=5)
)
raise AssertionError('Notify API call failed: {0} (HTTP {1})'.format(response.text, response.status_code))
|
def send_notification(message, title):
""" Sends notification using the preferred service """
notification_settings = NotificationSetting.get_solo()
DATA_FORMAT = {
NotificationSetting.NOTIFICATION_PUSHOVER: {
'url': NotificationSetting.PUSHOVER_API_URL,
'data': {
'token': notification_settings.pushover_api_key,
'user': notification_settings.pushover_user_key,
'priority': '-1',
'title': title,
'message': message
}
},
NotificationSetting.NOTIFICATION_PROWL: {
'url': NotificationSetting.PROWL_API_URL,
'data': {
'apikey': notification_settings.prowl_api_key,
'priority': '-2',
'application': 'DSMR-Reader',
'event': title,
'description': message
}
},
NotificationSetting.NOTIFICATION_TELEGRAM: {
'url': '{}{}/sendMessage'.format(NotificationSetting.TELEGRAM_API_URL, notification_settings.telegram_api_key),
'data': {
'chat_id': notification_settings.telegram_chat_id,
'disable_notification': 'true',
'text': message
}
},
}
response = requests.post(
**DATA_FORMAT[notification_settings.notification_service]
)
if response.status_code == 200:
return
# Invalid request, do not retry.
if str(response.status_code).startswith('4'):
logger.error(' - Notification API returned client error, wiping settings...')
NotificationSetting.objects.update(
notification_service=None,
pushover_api_key=None,
pushover_user_key=None,
prowl_api_key=None,
next_notification=None,
telegram_api_key=None,
telegram_chat_id=None
)
Notification.objects.create(
message='Notification API error, settings are reset. Error: {}'.format(response.text),
redirect_to='admin:dsmr_notification_notificationsetting_changelist'
)
# Server error, delay a bit.
elif str(response.status_code).startswith('5'):
logger.warning(' - Notification API returned server error, retrying later...')
NotificationSetting.objects.update(
next_notification=timezone.now() + timezone.timedelta(minutes=5)
)
raise AssertionError('Notify API call failed: {0} (HTTP {1})'.format(response.text, response.status_code))
|
46,388 |
def run_in_venv(caller_file, script_file: str, *args,
input_data: Union[dict, ns], dataset: Dataset, config: TaskConfig,
options: Union[None, dict, ns] = None,
process_results=None,
python_exec=None,
retain_tmp_env: bool = True):
here = dir_of(caller_file)
if python_exec is None: # use local virtual env by default
python_exec = venv_python_exec(here)
script_path = os.path.join(here, script_file)
cmd = f"{python_exec} {script_path}"
options = ns.from_dict(options) if options else ns()
ser_config = options['serialization']
env = options['env'] or ns()
# To honour any arguments about tmpdir, we copy over
# the TMP, TEMP and TMPDIR environment variables
if retain_tmp_env:
for env_var in ["TMP", "TEMP", "TMPDIR"]:
env[env_var] = os.environ.get(env_var)
with TemporaryDirectory() as tmpdir:
ds = _make_input_dataset(input_data, dataset, tmpdir, serialization=ser_config)
config.result_dir = tmpdir
config.result_file = mktemp(dir=tmpdir)
params = json_dumps(dict(dataset=ds, config=config, options=options), style='compact')
log.debug("Params passed to subprocess:\n%s", params)
cmon = rconfig().monitoring
monitor = (dict(interval_seconds=cmon.interval_seconds,
verbosity=cmon.verbosity)
if 'sub_proc_memory' in cmon.statistics
else None)
env = dict(
PATH=os.pathsep.join([
venv_bin(here),
os.environ['PATH']
]),
PYTHONPATH=os.pathsep.join([
rconfig().root_dir,
]),
AMLB_PATH=os.path.join(rconfig().root_dir, "amlb"),
AMLB_LOG_TRACE=str(logging.TRACE if hasattr(logging, 'TRACE') else ''),
**{k: str(v) for k, v in env}
)
with Timer() as proc_timer:
output, err = run_cmd(cmd, *args,
_input_str_=params,
_live_output_=True,
_error_level_=logging.DEBUG,
_env_=env,
_monitor_=monitor
)
res = ns(lambda: None)
if os.path.exists(config.result_file):
res = json_load(config.result_file, as_namespace=True)
log.debug("Result from subprocess:\n%s", res)
if not res:
raise NoResultError(f"Process crashed:\n{err}")
if res.error_message is not None:
raise NoResultError(res.error_message)
for name in ['predictions', 'truth', 'probabilities']:
res[name] = deserialize_data(res[name], config=ser_config) if res[name] is not None else None
if callable(process_results):
res = process_results(res)
if res.output_file:
save_predictions(dataset=dataset,
output_file=res.output_file,
predictions=as_vec(res.predictions),
truth=(as_vec(res.truth) if res.truth is not None
else dataset.test.y_enc if res.target_is_encoded
else dataset.test.y),
probabilities=res.probabilities,
probabilities_labels=res.probabilities_labels,
target_is_encoded=res.target_is_encoded)
return dict(
models_count=res.models_count if res.models_count is not None else 1,
training_duration=res.training_duration if res.training_duration is not None else proc_timer.duration,
predict_duration=res.predict_duration,
**res.others.__dict__
)
|
def run_in_venv(caller_file, script_file: str, *args,
input_data: Union[dict, ns], dataset: Dataset, config: TaskConfig,
options: Union[None, dict, ns] = None,
process_results=None,
python_exec=None,
retained_env_vars: List[str] = ['TMP', 'TEMP', 'TMPDIR']):
here = dir_of(caller_file)
if python_exec is None: # use local virtual env by default
python_exec = venv_python_exec(here)
script_path = os.path.join(here, script_file)
cmd = f"{python_exec} {script_path}"
options = ns.from_dict(options) if options else ns()
ser_config = options['serialization']
env = options['env'] or ns()
# To honour any arguments about tmpdir, we copy over
# the TMP, TEMP and TMPDIR environment variables
if retain_tmp_env:
for env_var in ["TMP", "TEMP", "TMPDIR"]:
env[env_var] = os.environ.get(env_var)
with TemporaryDirectory() as tmpdir:
ds = _make_input_dataset(input_data, dataset, tmpdir, serialization=ser_config)
config.result_dir = tmpdir
config.result_file = mktemp(dir=tmpdir)
params = json_dumps(dict(dataset=ds, config=config, options=options), style='compact')
log.debug("Params passed to subprocess:\n%s", params)
cmon = rconfig().monitoring
monitor = (dict(interval_seconds=cmon.interval_seconds,
verbosity=cmon.verbosity)
if 'sub_proc_memory' in cmon.statistics
else None)
env = dict(
PATH=os.pathsep.join([
venv_bin(here),
os.environ['PATH']
]),
PYTHONPATH=os.pathsep.join([
rconfig().root_dir,
]),
AMLB_PATH=os.path.join(rconfig().root_dir, "amlb"),
AMLB_LOG_TRACE=str(logging.TRACE if hasattr(logging, 'TRACE') else ''),
**{k: str(v) for k, v in env}
)
with Timer() as proc_timer:
output, err = run_cmd(cmd, *args,
_input_str_=params,
_live_output_=True,
_error_level_=logging.DEBUG,
_env_=env,
_monitor_=monitor
)
res = ns(lambda: None)
if os.path.exists(config.result_file):
res = json_load(config.result_file, as_namespace=True)
log.debug("Result from subprocess:\n%s", res)
if not res:
raise NoResultError(f"Process crashed:\n{err}")
if res.error_message is not None:
raise NoResultError(res.error_message)
for name in ['predictions', 'truth', 'probabilities']:
res[name] = deserialize_data(res[name], config=ser_config) if res[name] is not None else None
if callable(process_results):
res = process_results(res)
if res.output_file:
save_predictions(dataset=dataset,
output_file=res.output_file,
predictions=as_vec(res.predictions),
truth=(as_vec(res.truth) if res.truth is not None
else dataset.test.y_enc if res.target_is_encoded
else dataset.test.y),
probabilities=res.probabilities,
probabilities_labels=res.probabilities_labels,
target_is_encoded=res.target_is_encoded)
return dict(
models_count=res.models_count if res.models_count is not None else 1,
training_duration=res.training_duration if res.training_duration is not None else proc_timer.duration,
predict_duration=res.predict_duration,
**res.others.__dict__
)
|
17,927 |
def build_pat(node_json): # Ici node_json c'est le dossier 'parameters'
"""Construit le dictionnaire de barèmes des cotisations employeur à partir des paramètres de parameters"""
pat = ParameterNode("pat", data={}) # Génère pat
commun = ParameterNode("commun", data={}) # Génère commun
# Réindexation: nouveaux chemins
autres = node_json.prelevements_sociaux.autres_taxes_participations_assises_salaires
retraites = node_json.prelevements_sociaux.regimes_complementaires_retraite_secteur_prive
chom = node_json.prelevements_sociaux.cotisations_regime_assurance_chomage
cotiz = node_json.prelevements_sociaux.cotisations_securite_sociale_regime_general
public = node_json.prelevements_sociaux.cotisations_secteur_public
# Création de commun
# Apprentissage
commun.children['apprentissage'] = autres.apprentissage.children['apprentissage']
commun.children['apprentissage_add'] = autres.apprentissage.children['apprentissage_add']
commun.children['apprentissage'] = autres.apprentissage.children['apprentissage']
commun.children['apprentissage_alsace_moselle'] = autres.apprentissage.children['apprentissage_alsace_moselle']
# Formation
commun.children['formprof_09'] = autres.formation.children['formprof_09']
commun.children['formprof_1019'] = autres.formation.children['formprof_1019']
commun.children['formprof_20'] = autres.formation.children['formprof_20']
# Construction
commun.children['construction'] = autres.construction.children['construction_20']
commun.children['seuil'] = autres.construction.children['seuil']
# Reste
commun.children.update(chom.assedic.employeur.children)
commun.children.update(chom.chomfg.children)
commun.children.update(cotiz.csa.bareme.children) # À harmoniser !
commun.children.update(cotiz.famille.bareme.children) # À harmoniser !
commun.children.update(autres.fnal.children) # À harmoniser !
commun.children.update(autres.fin_syndic.children) # À harmoniser !
commun.children.update(cotiz.penibilite.bareme.children) # À harmoniser !
commun.children.update(cotiz.cnav.bareme.employeur.children) # À harmoniser !
commun.children.update(cotiz.mmid.bareme.employeur.children) # À harmoniser ! + Créer params depuis IPP
# Réindexation NonCadre
# Initialisation
noncadre = ParameterNode("noncadre", data={})
pat.add_child('noncadre', noncadre)
pat.children['noncadre'].children.update(retraites.employeur.noncadre.children)
pat.children['noncadre'].children.update(commun.children)
# Réindexation Cadre
# Initialisation
cadre = ParameterNode("cadre", data={})
pat.add_child('cadre', cadre)
pat.children['cadre'].children.update(retraites.employeur.cadre.children)
pat.children['cadre'].children.update(commun.children)
# Réindexation Fonc
# Initialisation
fonc = ParameterNode("fonc", data={})
pat.add_child('fonc', fonc)
fonc.add_child('colloc', ParameterNode("colloc", data={}))
fonc.add_child('etat', ParameterNode("etat", data={}))
fonc.add_child('contract', ParameterNode("contract", data={}))
# Contractuel
pat.children['fonc'].children['contract'] = public.ircantec.employeur
pat.children['fonc'].children['contract'].children.update(commun.children)
# Etat
pat.children['fonc'].children['etat'].children.update(public.mmid.etat.children)
pat.children['fonc'].children['etat'].children.update(public.retraite.ati.children)
pat.children['fonc'].children['etat'].children.update(public.rafp.employeur.children)
pat.children['fonc'].children['etat'].children.update(public.retraite.pension.employeur.children)
# Collectivités Locales
pat.children['fonc'].children['colloc'].children['hospitaliere'] = public.cnral.employeur.hospitaliere
pat.children['fonc'].children['colloc'].children['territoriale'] = public.cnral.employeur.territoriale
pat.children['fonc'].children['colloc'].children.update(public.cnral.employeur.children)
pat.children['fonc'].children['colloc'].children.update(public.mmid.colloc.children)
pat.children['fonc'].children['colloc'].children.update(public.rafp.employeur.children)
# Renaming
pat.children['prive_non_cadre'] = pat.children.pop('noncadre')
pat.children['prive_cadre'] = pat.children.pop('cadre')
# Rework commun to deal with public employees
for var in ["apprentissage", "apprentissage_add", "apprentissage_alsace_moselle", "assedic", "chomfg", "construction", "maladie", "formprof_09",
"formprof_1019", "formprof_20", "vieillesse_deplafonnee", "vieillesse_plafonnee"]:
del commun.children[var]
for var in ["apprentissage", "apprentissage_add", "apprentissage_alsace_moselle", "formprof_09", "formprof_1019", "formprof_20", "chomfg",
"construction", "assedic"]:
del pat.children['fonc'].children['contract'].children[var]
pat.children['fonc'].children['etat'].children.update(commun.children)
pat.children['fonc'].children['colloc'].children.update(commun.children)
pat.children['etat_t'] = pat.children['fonc'].children['etat']
pat.children['colloc_t'] = pat.children['fonc'].children['colloc']
pat.children['contract'] = pat.children['fonc'].children['contract']
for var in ['etat', 'colloc', 'contract']:
del pat.children['fonc'].children[var]
# Renaming
pat.children['public_titulaire_etat'] = pat.children.pop('etat_t')
# del pat.children['public_titulaire_etat'].children['rafp']
pat.children['public_titulaire_territoriale'] = pat.children.pop('colloc_t')
pat.children['public_titulaire_hospitaliere'] = copy.deepcopy(pat.children['public_titulaire_territoriale'])
for category in ['territoriale', 'hospitaliere']:
for name, bareme in pat.children['public_titulaire_' + category].children[category].children.items():
pat.children['public_titulaire_{}'.format(category)].children[name] = bareme
for category in ['territoriale', 'hospitaliere']:
del pat.children['public_titulaire_territoriale'].children[category]
del pat.children['public_titulaire_hospitaliere'].children[category]
pat.children['public_non_titulaire'] = pat.children.pop('contract')
return pat
|
def build_pat(node_json): # Ici node_json c'est le dossier 'parameters'
"""Construit le dictionnaire de barèmes des cotisations employeur à partir des paramètres de parameters"""
pat = ParameterNode("pat", data={}) # Génère pat
commun = ParameterNode("commun", data={}) # Génère commun
# Réindexation: nouveaux chemins
autres = node_json.prelevements_sociaux.autres_taxes_participations_assises_salaires
retraites = node_json.prelevements_sociaux.regimes_complementaires_retraite_secteur_prive
chom = node_json.prelevements_sociaux.cotisations_regime_assurance_chomage
cotiz = node_json.prelevements_sociaux.cotisations_securite_sociale_regime_general
public = node_json.prelevements_sociaux.cotisations_secteur_public
# Création de commun
# Apprentissage
commun.children['apprentissage'] = autres.apprentissage.children['apprentissage']
commun.children['apprentissage_add'] = autres.apprentissage.children['apprentissage_add']
commun.children['apprentissage'] = autres.apprentissage.children['apprentissage']
commun.children['apprentissage_alsace_moselle'] = autres.apprentissage.children['apprentissage_alsace_moselle']
# Formation
commun.children['formprof_09'] = autres.formation.children['formprof_09']
commun.children['formprof_1019'] = autres.formation.children['formprof_1019']
commun.children['formprof_20'] = autres.formation.children['formprof_20']
# Construction
commun.children['construction'] = autres.construction.children['construction_20']
commun.children['seuil'] = autres.construction.children['seuil']
# Autres thématiques
commun.children.update(chom.assedic.employeur.children)
commun.children.update(chom.chomfg.children)
commun.children.update(cotiz.csa.bareme.children) # À harmoniser !
commun.children.update(cotiz.famille.bareme.children) # À harmoniser !
commun.children.update(autres.fnal.children) # À harmoniser !
commun.children.update(autres.fin_syndic.children) # À harmoniser !
commun.children.update(cotiz.penibilite.bareme.children) # À harmoniser !
commun.children.update(cotiz.cnav.bareme.employeur.children) # À harmoniser !
commun.children.update(cotiz.mmid.bareme.employeur.children) # À harmoniser ! + Créer params depuis IPP
# Réindexation NonCadre
# Initialisation
noncadre = ParameterNode("noncadre", data={})
pat.add_child('noncadre', noncadre)
pat.children['noncadre'].children.update(retraites.employeur.noncadre.children)
pat.children['noncadre'].children.update(commun.children)
# Réindexation Cadre
# Initialisation
cadre = ParameterNode("cadre", data={})
pat.add_child('cadre', cadre)
pat.children['cadre'].children.update(retraites.employeur.cadre.children)
pat.children['cadre'].children.update(commun.children)
# Réindexation Fonc
# Initialisation
fonc = ParameterNode("fonc", data={})
pat.add_child('fonc', fonc)
fonc.add_child('colloc', ParameterNode("colloc", data={}))
fonc.add_child('etat', ParameterNode("etat", data={}))
fonc.add_child('contract', ParameterNode("contract", data={}))
# Contractuel
pat.children['fonc'].children['contract'] = public.ircantec.employeur
pat.children['fonc'].children['contract'].children.update(commun.children)
# Etat
pat.children['fonc'].children['etat'].children.update(public.mmid.etat.children)
pat.children['fonc'].children['etat'].children.update(public.retraite.ati.children)
pat.children['fonc'].children['etat'].children.update(public.rafp.employeur.children)
pat.children['fonc'].children['etat'].children.update(public.retraite.pension.employeur.children)
# Collectivités Locales
pat.children['fonc'].children['colloc'].children['hospitaliere'] = public.cnral.employeur.hospitaliere
pat.children['fonc'].children['colloc'].children['territoriale'] = public.cnral.employeur.territoriale
pat.children['fonc'].children['colloc'].children.update(public.cnral.employeur.children)
pat.children['fonc'].children['colloc'].children.update(public.mmid.colloc.children)
pat.children['fonc'].children['colloc'].children.update(public.rafp.employeur.children)
# Renaming
pat.children['prive_non_cadre'] = pat.children.pop('noncadre')
pat.children['prive_cadre'] = pat.children.pop('cadre')
# Rework commun to deal with public employees
for var in ["apprentissage", "apprentissage_add", "apprentissage_alsace_moselle", "assedic", "chomfg", "construction", "maladie", "formprof_09",
"formprof_1019", "formprof_20", "vieillesse_deplafonnee", "vieillesse_plafonnee"]:
del commun.children[var]
for var in ["apprentissage", "apprentissage_add", "apprentissage_alsace_moselle", "formprof_09", "formprof_1019", "formprof_20", "chomfg",
"construction", "assedic"]:
del pat.children['fonc'].children['contract'].children[var]
pat.children['fonc'].children['etat'].children.update(commun.children)
pat.children['fonc'].children['colloc'].children.update(commun.children)
pat.children['etat_t'] = pat.children['fonc'].children['etat']
pat.children['colloc_t'] = pat.children['fonc'].children['colloc']
pat.children['contract'] = pat.children['fonc'].children['contract']
for var in ['etat', 'colloc', 'contract']:
del pat.children['fonc'].children[var]
# Renaming
pat.children['public_titulaire_etat'] = pat.children.pop('etat_t')
# del pat.children['public_titulaire_etat'].children['rafp']
pat.children['public_titulaire_territoriale'] = pat.children.pop('colloc_t')
pat.children['public_titulaire_hospitaliere'] = copy.deepcopy(pat.children['public_titulaire_territoriale'])
for category in ['territoriale', 'hospitaliere']:
for name, bareme in pat.children['public_titulaire_' + category].children[category].children.items():
pat.children['public_titulaire_{}'.format(category)].children[name] = bareme
for category in ['territoriale', 'hospitaliere']:
del pat.children['public_titulaire_territoriale'].children[category]
del pat.children['public_titulaire_hospitaliere'].children[category]
pat.children['public_non_titulaire'] = pat.children.pop('contract')
return pat
|
46,080 |
def main(args, unknown_args):
args, config = parse_args_uargs(args, unknown_args)
pprint(args)
pprint(config)
set_global_seeds(args.seed)
modules = prepare_modules(model_dir=args.model_dir)
datasource = modules["data"].DataSource()
data_params = config.get("data_params", {}) or {}
loaders = datasource.prepare_loaders(
mode="infer",
n_workers=args.workers,
batch_size=args.batch_size,
**data_params
)
model = modules["model"].prepare_model(config)
runner = modules["model"].ModelRunner(model=model)
callbacks_params = config.get("callbacks_params", {}) or {}
callbacks = runner.prepare_callbacks(
mode="infer",
resume=args.resume,
out_prefix=args.out_prefix,
**callbacks_params
)
runner.infer(loaders=loaders, callbacks=callbacks, verbose=args.verbose)
|
def main(args, unknown_args):
args, config = parse_args_uargs(args, unknown_args)
pprint(args)
pprint(config)
set_global_seeds(args.seed)
modules = prepare_modules(model_dir=args.model_dir)
datasource = modules["data"].DataSource()
data_params = config.get("data_params", {})
loaders = datasource.prepare_loaders(
mode="infer",
n_workers=args.workers,
batch_size=args.batch_size,
**data_params
)
model = modules["model"].prepare_model(config)
runner = modules["model"].ModelRunner(model=model)
callbacks_params = config.get("callbacks_params", {}) or {}
callbacks = runner.prepare_callbacks(
mode="infer",
resume=args.resume,
out_prefix=args.out_prefix,
**callbacks_params
)
runner.infer(loaders=loaders, callbacks=callbacks, verbose=args.verbose)
|
10,413 |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list'),
security_groups=dict(type='list'),
assign_public_ip=dict(type='bool')
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict', options=dict(
subnets=dict(type='list', elements='str'),
security_groups=dict(type='list'),
assign_public_ip=dict(type='bool')
)),
launch_type=dict(required=False, choices=['EC2', 'FARGATE'])
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'],
module.params['launch_type'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
|
999 |
def _types_simplenamespace_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
name = 'namespace'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
for idx, (attr, value) in enumerate(obj.__dict__.items()):
if idx:
p.text(',')
p.breakable()
attr_kwarg = '{}='.format(attr)
with p.group(len(attr_kwarg), attr_kwarg):
p.pretty(value)
|
def _types_simplenamespace_pprint(obj, p, cycle):
"""The pprint function for types.SimpleNamespace."""
name = 'namespace'
with p.group(len(name) + 1, name + '(', ')'):
if cycle:
p.text('...')
else:
for idx, (attr, value) in enumerate(obj.__dict__.items()):
if idx:
p.text(',')
p.breakable()
attr_kwarg = '{}='.format(attr)
with p.group(len(attr_kwarg), attr_kwarg):
p.pretty(value)
|
29,757 |
def get_listening_activity_week():
""" Get the weekly listening activity for all users """
current_app.logger.debug("Calculating listening_activity_week")
date = get_latest_listen_ts()
to_date = get_last_monday(date)
from_date = adjust_days(to_date, 7)
time_range = []
for offset in range(0, 7):
day = adjust_days(from_date, offset, shift_backwards=False)
time_range.append([day.strftime('%A'), day, datetime(day.year, day.month, day.day, hour=23, minute=59, second=59)])
time_range_df = session.createDataFrame(time_range, time_range_schema)
time_range_df.createOrReplaceTempView('time_range')
listens_df = get_listens(from_date, to_date, path=LISTENBRAINZ_DATA_DIRECTORY)
listens_df.createOrReplaceTempView('listens')
data = get_listening_activity('week')
messages = create_messages(data)
current_app.logger.debug("Done!")
return messages
|
def get_listening_activity_week():
""" Get the weekly listening activity for all users """
current_app.logger.debug("Calculating listening_activity_week")
date = get_latest_listen_ts()
to_date = get_last_monday(date)
from_date = adjust_days(to_date, 7)
time_range = []
for offset in range(0, 7):
day = adjust_days(from_date, offset, shift_backwards=False)
time_range.append([day.strftime('%A'), day, _get_next_day(day)])
time_range_df = session.createDataFrame(time_range, time_range_schema)
time_range_df.createOrReplaceTempView('time_range')
listens_df = get_listens(from_date, to_date, path=LISTENBRAINZ_DATA_DIRECTORY)
listens_df.createOrReplaceTempView('listens')
data = get_listening_activity('week')
messages = create_messages(data)
current_app.logger.debug("Done!")
return messages
|
12,400 |
def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
prefix='ci-info: '):
if not key_entries:
message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message, console=True, stderr=False)
return
tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
'Comment']
tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
row = [entry.keytype or '-',
_gen_fingerprint(entry.base64, hash_meth) or '-',
entry.options or '-',
entry.comment or '-']
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
util.center("Authorized keys from %s for user %s" %
(key_fn, user), "+", max_len),
]
lines.extend(authtbl_lines)
for line in lines:
util.multi_log(text="%s%s\n" % (prefix, line),
stderr=False, console=True)
|
def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
prefix='ci-info: '):
if not key_entries:
message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message, console=False, stderr=True)
return
tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
'Comment']
tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
row = [entry.keytype or '-',
_gen_fingerprint(entry.base64, hash_meth) or '-',
entry.options or '-',
entry.comment or '-']
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
util.center("Authorized keys from %s for user %s" %
(key_fn, user), "+", max_len),
]
lines.extend(authtbl_lines)
for line in lines:
util.multi_log(text="%s%s\n" % (prefix, line),
stderr=False, console=True)
|
8,671 |
def load_settings(options):
"""Load Sopel's settings using the command line's ``options``.
:param options: parsed arguments
:return: sopel configuration
:rtype: :class:`sopel.config.Config`
:raise sopel.config.ConfigurationNotFound: raised when configuration file
is not found
:raise sopel.config.ConfigurationError: raised when configuration is
invalid
This function loads Sopel's settings from one of these sources:
* value of ``options.config``, if given,
* ``SOPEL_CONFIG`` environ variable, if no option is given,
* otherwise the ``default`` configuration is loaded,
then loads the settings and returns it as a :class:`~sopel.config.Config`
object.
If the configuration file can not be found, a
:exc:`sopel.config.ConfigurationNotFound` error will be raised.
.. note::
To use this function effectively, the
:func:`sopel.cli.utils.add_common_arguments` function should be used to
add the proper option to the argument parser.
"""
# Default if no options.config or no env var or if they are empty
name = 'default'
if options.config:
name = options.config
elif 'SOPEL_CONFIG' in os.environ:
name = os.environ['SOPEL_CONFIG'] or name # use default if empty
filename = find_config(config.DEFAULT_HOMEDIR, name)
if not os.path.isfile(filename):
raise config.ConfigurationNotFound(filename=filename)
return config.Config(filename)
|
def load_settings(options):
"""Load Sopel's settings using the command line's ``options``.
:param options: parsed arguments
:return: sopel configuration
:rtype: :class:`sopel.config.Config`
:raise sopel.config.ConfigurationNotFound: raised when configuration file
is not found
:raise sopel.config.ConfigurationError: raised when configuration is
invalid
This function loads Sopel's settings from one of these sources:
* value of ``options.config``, if given,
* ``SOPEL_CONFIG`` environment variable, if no option is given,
* otherwise the ``default`` configuration is loaded,
then loads the settings and returns it as a :class:`~sopel.config.Config`
object.
If the configuration file can not be found, a
:exc:`sopel.config.ConfigurationNotFound` error will be raised.
.. note::
To use this function effectively, the
:func:`sopel.cli.utils.add_common_arguments` function should be used to
add the proper option to the argument parser.
"""
# Default if no options.config or no env var or if they are empty
name = 'default'
if options.config:
name = options.config
elif 'SOPEL_CONFIG' in os.environ:
name = os.environ['SOPEL_CONFIG'] or name # use default if empty
filename = find_config(config.DEFAULT_HOMEDIR, name)
if not os.path.isfile(filename):
raise config.ConfigurationNotFound(filename=filename)
return config.Config(filename)
|
32,615 |
def team_cymru_bulk_whois(client: Client, bulk: List[str]) -> Optional[Dict[str, Any]]:
"""Perform lookups by bulk of ip addresses, returning a dictionary of ip -> record (ASN, Country Code, and Netblock Owner.)
:type client: ``Client``
:param client: cymruwhois client to use
:type bulk: ``list``
:param bulk: list of ip addresses
:return: dict containing the result of the lookupmany action as returned from the API (asn, cc, owner, etc.)
:rtype: Dict[str, Dict[str, str]]
"""
raw_result = client.lookupmany_dict(bulk)
return {k: vars(raw_result[k]) for k in raw_result} if raw_result else None
|
def team_cymru_bulk_whois(client: Client, bulk: List[str]) -> Optional[Dict[str, Any]]:
"""Performs bulk IP address searches, returning a dictionary of IP records (ASN, Country Code, and Netblock Owner.)
:type client: ``Client``
:param client: cymruwhois client to use
:type bulk: ``list``
:param bulk: list of ip addresses
:return: dict containing the result of the lookupmany action as returned from the API (asn, cc, owner, etc.)
:rtype: Dict[str, Dict[str, str]]
"""
raw_result = client.lookupmany_dict(bulk)
return {k: vars(raw_result[k]) for k in raw_result} if raw_result else None
|
32,059 |
def wait_and_complete_task_command(args: Dict[str, Any]) -> CommandResults:
"""
Args:
args: Script arguments
Returns:
CompletedTask - Tasks that was completed by script
FoundTasks - Tasks that was found by script, and already completed, not by this script
"""
task_states = argToList(args.get('task_states'))
if not all(state in POSSIBLE_STATES for state in task_states):
raise Exception(f'task_states are bad. Possible values: {POSSIBLE_STATES}')
complete_option = args.get('complete_option')
incident_id = args.get('incident_id')
if not incident_id:
incident = demisto.incidents()[0]
incident_id = incident.get('id')
task_name = args.get('task_name')
complete_task = argToBoolean(args.get('complete_task', 'true'))
max_timeout = arg_to_number(args.get('max_timeout', 60))
interval_between_tries = arg_to_number(args.get('interval_between_tries', 3))
completed_tasks = []
found_tasks = []
start_time = time.time()
while True:
tasks_by_states = get_incident_tasks_by_state(incident_id, task_states)
requested_task = None
# find task to complete if was given task name
if task_name:
for task in tasks_by_states:
if task['name'] == task_name:
requested_task = task
break
if requested_task and complete_task:
# complete the requested task
complete_task_by_id(
requested_task.get('id'),
requested_task.get('parentPlaybookID'),
incident_id,
complete_option
)
completed_tasks.append(requested_task.get('name'))
break
elif requested_task:
# just validate that task was found and not complete it
found_tasks.append(requested_task.get('name'))
break
elif not task_name and tasks_by_states and complete_task:
# complete all tasks, which state is task_states
for task in tasks_by_states:
complete_res = complete_task_by_id(
task.get('id'),
task.get('parentPlaybookID'),
incident_id,
complete_option
)
if 'Task is completed already' in complete_res:
found_tasks.append(task.get('name'))
else:
completed_tasks.append(task.get('name'))
break
elif not task_name and tasks_by_states:
# just validate that task was found and not complete it
found_tasks.extend(task.get('name') for task in tasks_by_states)
break
if time.time() - start_time > max_timeout: # type: ignore[operator]
break
sleep(float(interval_between_tries)) # type: ignore[arg-type]
if not completed_tasks and not found_tasks:
if task_name and task_states:
raise Exception(f'The task "{task_name}" did not reach the {" or ".join(task_states)} state.')
elif task_name:
raise Exception(f'The task "{task_name}" was not found by script.')
elif task_states:
raise Exception(f'None of the tasks reached the {" or ".join(task_states)} state.')
else:
raise Exception('No tasks were found.')
return CommandResults(
outputs_prefix='WaitAndCompleteTask',
outputs_key_field='',
outputs={'CompletedTask': completed_tasks,
'FoundTasks': found_tasks},
)
|
def wait_and_complete_task_command(args: Dict[str, Any]) -> CommandResults:
"""
Args:
args: Script arguments
Returns:
CompletedTask - Tasks that was completed by script
FoundTasks - Tasks that was found by script, and already completed, not by this script
"""
task_states = argToList(args.get('task_states'))
if not all(state in POSSIBLE_STATES for state in task_states):
raise Exception(f'task_states are bad. Possible values: {POSSIBLE_STATES}')
complete_option = args.get('complete_option')
incident_id = args.get('incident_id')
if not incident_id:
incident = demisto.incidents()[0]
incident_id = incident.get('id')
task_name = args.get('task_name')
complete_task = argToBoolean(args.get('complete_task', 'true'))
max_timeout = arg_to_number(args.get('max_timeout', 60))
interval_between_tries = arg_to_number(args.get('interval_between_tries', 3))
completed_tasks = []
found_tasks = []
start_time = time.time()
while True:
tasks_by_states = get_incident_tasks_by_state(incident_id, task_states)
requested_task = None
# find task to complete if was given task name
if task_name:
for task in tasks_by_states:
if task['name'] == task_name:
requested_task = task
break
if requested_task and complete_task:
# complete the requested task
complete_task_by_id(
requested_task.get('id'),
requested_task.get('parentPlaybookID'),
incident_id,
complete_option
)
completed_tasks.append(requested_task.get('name'))
break
elif requested_task:
# just validate that task was found and not complete it
found_tasks.append(requested_task.get('name'))
break
elif not task_name and tasks_by_states and complete_task:
# complete all tasks, which state is task_states
for task in tasks_by_states:
complete_res = complete_task_by_id(
task.get('id'),
task.get('parentPlaybookID'),
incident_id,
complete_option
)
if 'Task is completed already' in complete_res:
found_tasks.append(task.get('name'))
else:
completed_tasks.append(task.get('name'))
break
elif not task_name and tasks_by_states:
# just validate that task was found and not complete it
found_tasks.extend(task.get('name') for task in tasks_by_states)
break
if time.time() - start_time > max_timeout: # type: ignore[operator]
break
sleep(float(interval_between_tries)) # type: ignore[arg-type]
if not completed_tasks and not found_tasks:
if task_name and task_states:
raise Exception(f'The task "{task_name}" did not reach the {" or ".join(task_states)} state.')
elif task_name:
raise Exception(f'The task "{task_name}" was not found by script.')
elif task_states:
raise Exception(f'None of the tasks reached the {" or ".join(task_states)} state.')
else:
raise Exception('No tasks were found.')
return CommandResults(
outputs_prefix='WaitAndCompleteTask',
outputs_key_field='',
outputs={'CompletedTask': completed_tasks,
'FoundTask': found_tasks},
)
|
8,167 |
def get_timerange_from_exdict(exdict):
"""
Function to get URL's timerange using extracted metadata.
Parameters
----------
exdict : `dict`
Metadata extracted from the file's url.
Returns
-------
file_timerange: `~sunpy.time.TimeRange`
The time range of the file.
"""
datetypes = ['year', 'month', 'day']
timetypes = ['hour', 'minute', 'second', 'millisecond']
dtlist = []
for d in datetypes:
dtlist.append(int(exdict.get(d, 1)))
for t in timetypes:
dtlist.append(int(exdict.get(t, 0)))
startTime = Time(datetime.datetime(*dtlist))
tdelta = 1*u.millisecond
if "year" in exdict:
if calendar.isleap(int(exdict['year'])):
tdelta = 366*u.day
else:
tdelta = 365*u.day
if "month" in exdict:
days_in_month = calendar.monthrange(int(exdict['year']), int(exdict['month']))[1]
tdelta = days_in_month*u.day
if "day" in exdict:
tdelta = 1*u.day
if "hour" in exdict:
tdelta = 1*u.hour
if "minute" in exdict:
tdelta = 1*u.minute
if "second" in exdict:
tdelta = 1*u.second
endTime = startTime + TimeDelta(tdelta - 1*u.millisecond)
file_timerange = TimeRange(startTime, endTime)
return file_timerange
|
def get_timerange_from_exdict(exdict):
"""
Function to get URL's timerange using extracted metadata.
Parameters
----------
exdict : `dict`
Metadata extracted from the file's url.
Returns
-------
`~sunpy.time.TimeRange`
The time range of the file.
"""
datetypes = ['year', 'month', 'day']
timetypes = ['hour', 'minute', 'second', 'millisecond']
dtlist = []
for d in datetypes:
dtlist.append(int(exdict.get(d, 1)))
for t in timetypes:
dtlist.append(int(exdict.get(t, 0)))
startTime = Time(datetime.datetime(*dtlist))
tdelta = 1*u.millisecond
if "year" in exdict:
if calendar.isleap(int(exdict['year'])):
tdelta = 366*u.day
else:
tdelta = 365*u.day
if "month" in exdict:
days_in_month = calendar.monthrange(int(exdict['year']), int(exdict['month']))[1]
tdelta = days_in_month*u.day
if "day" in exdict:
tdelta = 1*u.day
if "hour" in exdict:
tdelta = 1*u.hour
if "minute" in exdict:
tdelta = 1*u.minute
if "second" in exdict:
tdelta = 1*u.second
endTime = startTime + TimeDelta(tdelta - 1*u.millisecond)
file_timerange = TimeRange(startTime, endTime)
return file_timerange
|
17,877 |
def _mantel_stats_spearman(x, y, permutations):
"""Compute original and permuted stats using spearmanr.
Parameters
----------
x, y : DistanceMatrix
Input distance matrices to compare.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permutted correlation coefficients of the test.
"""
x_flat = x.condensed_form()
y_flat = y.condensed_form()
# If an input is constant, the correlation coefficient is not defined.
if (x_flat == x_flat[0]).all() or (y_flat == y_flat[0]).all():
warnings.warn(SpearmanRConstantInputWarning())
return np.nan, []
y_rank = scipy.stats.rankdata(y_flat)
del y_flat
x_rank = scipy.stats.rankdata(x_flat)
del x_flat
x_rank_matrix = DistanceMatrix(x_rank, x.ids)
del x_rank
# for our purposes, spearman is just pearson on rankdata
return _mantel_stats_pearson_flat(x_rank_matrix, y_rank, permutations)
|
def _mantel_stats_spearman(x, y, permutations):
"""Compute original and permuted stats using spearmanr.
Parameters
----------
x, y : DistanceMatrix
Input distance matrices to compare.
permutations : int
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and
permuted_stats will be an empty array.
Returns
-------
orig_stat : 1D array_like
Correlation coefficient of the test.
permuted_stats : 1D array_like
Permuted correlation coefficients of the test.
"""
x_flat = x.condensed_form()
y_flat = y.condensed_form()
# If an input is constant, the correlation coefficient is not defined.
if (x_flat == x_flat[0]).all() or (y_flat == y_flat[0]).all():
warnings.warn(SpearmanRConstantInputWarning())
return np.nan, []
y_rank = scipy.stats.rankdata(y_flat)
del y_flat
x_rank = scipy.stats.rankdata(x_flat)
del x_flat
x_rank_matrix = DistanceMatrix(x_rank, x.ids)
del x_rank
# for our purposes, spearman is just pearson on rankdata
return _mantel_stats_pearson_flat(x_rank_matrix, y_rank, permutations)
|
52,698 |
def cvxopt_check():
global cvxopt_installed
if cvxopt_installed is None:
try:
import cvxopt
cvxopt_installed = True
except:
cvxopt_installed = False
return cvxopt_installed
|
def cvxopt_check():
global cvxopt_installed
if cvxopt_installed is None:
try:
import cvxopt
cvxopt_installed = True
except:
cvxopt_installed = False
return cvxopt_installed
|
42,124 |
def _log_failed_trial(
trial: FrozenTrial,
message: Union[str, Warning],
exc_info: Any = None,
value_or_values: Any = None,
) -> None:
_logger.warning(
"Trial {} failed because of the following error: {}".format(trial.number, message),
exc_info=exc_info,
)
_logger.warning("Trial {} failed with value {}.".format(trial.number, value_or_values))
|
def _log_failed_trial(
trial: FrozenTrial,
message: Union[str, Warning],
exc_info: Any = None,
value_or_values: Any = None,
) -> None:
_logger.warning(
"Trial {} failed because of the following error: {}".format(trial.number, message),
exc_info=exc_info,
)
_logger.warning("Trial {} failed with value {}.".format(trial.number, repr(value_or_values)))
|
7,174 |
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
def _get_fourier_filter(size, filter_name):
"""Construct the Fourier filter.
This computation lessens artifacts and removes a small bias as
explained in [1], Chap 3. Equation 61
Parameters
----------
size: int
filter size.
filter_name: str, optional
Filter used in frequency domain filtering. Ramp filter used by
default. Filters available: ramp, shepp-logan, cosine,
hamming, hann. Assign None to use no filter.
Returns
-------
fourier_filter: ndarray
The computed Fourier filter.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int),
np.arange(size / 2 - 1, 0, -2, dtype=np.int)))
f = np.zeros(size)
f[0] = 0.25
f[1::2] = -1 / (np.pi * n) ** 2
# Computing the ramp filter from the fourier transform of its
# frequency domain representation lessens artifacts and removes a
# small bias as explained in [1], Chap 3. Equation 61
fourier_filter = 2 * np.real(fft(f)) # ramp filter
if filter_name == "ramp":
pass
elif filter_name == "shepp-logan":
# Start from first element to avoid divide by zero
omega = np.pi * fftmodule.fftfreq(size)[1:]
fourier_filter[1:] *= np.sin(omega) / omega
elif filter_name == "cosine":
freq = np.pi * np.linspace(0, 1, size, endpoint=False)
cosine_filter = fftmodule.fftshift(np.sin(freq))
fourier_filter *= cosine_filter
elif filter_name == "hamming":
fourier_filter *= fftmodule.fftshift(np.hamming(size))
elif filter_name == "hann":
fourier_filter *= fftmodule.fftshift(np.hanning(size))
elif filter_name is None:
fourier_filter[:] = 1
return fourier_filter[:, np.newaxis]
|
17,500 |
def _put_attrs(zarr_obj, attrs):
for key, value in attrs.items():
try:
zarr_obj.attrs[key] = value
except TypeError as e:
raise TypeError(f"Invalid attr {key!r}: {value!r}. {e!s}") from e
return zarr_obj
|
def _put_attrs(zarr_obj, attrs):
zarr_obj.attrs.update(attrs)
|
5,153 |
def test_colorbar_set_formatter_locator():
# check that the locator properties echo what is on the axis:
fig, ax = plt.subplots()
pc = ax.pcolormesh(np.random.randn(10, 10))
cb = fig.colorbar(pc)
cb.ax.yaxis.set_major_locator(FixedLocator(np.arange(10)))
cb.ax.yaxis.set_minor_locator(FixedLocator(np.arange(0, 10, 0.2)))
assert cb.locator == cb.ax.yaxis.get_major_locator()
assert cb.minorlocator == cb.ax.yaxis.get_minor_locator()
cb.ax.yaxis.set_major_formatter(LogFormatter())
cb.ax.yaxis.set_minor_formatter(LogFormatter())
assert cb.formatter == cb.ax.yaxis.get_major_formatter()
assert cb.minorformatter == cb.ax.yaxis.get_minor_formatter()
# check that the setter works as expected:
loc = FixedLocator(np.arange(7))
cb.locator = loc
assert cb.ax.yaxis.get_major_locator() == loc
loc = FixedLocator(np.arange(0, 7, 0.1))
cb.minorlocator = loc
assert cb.ax.yaxis.get_minor_locator() == loc
fmt = LogFormatter()
cb.formatter = fmt
assert cb.ax.yaxis.get_major_formatter() == fmt
fmt = LogFormatter()
cb.minorformatter = fmt
assert cb.ax.yaxis.get_minor_formatter() == fmt
|
def test_colorbar_set_formatter_locator():
# check that the locator properties echo what is on the axis:
fig, ax = plt.subplots()
pc = ax.pcolormesh(np.random.randn(10, 10))
cb = fig.colorbar(pc)
cb.ax.yaxis.set_major_locator(FixedLocator(np.arange(10)))
cb.ax.yaxis.set_minor_locator(FixedLocator(np.arange(0, 10, 0.2)))
assert cb.locator == cb.ax.yaxis.get_major_locator()
assert cb.minorlocator == cb.ax.yaxis.get_minor_locator()
cb.ax.yaxis.set_major_formatter(LogFormatter())
cb.ax.yaxis.set_minor_formatter(LogFormatter())
assert cb.formatter == cb.ax.yaxis.get_major_formatter()
assert cb.minorformatter == cb.ax.yaxis.get_minor_formatter()
# check that the setter works as expected:
loc = FixedLocator(np.arange(7))
cb.locator = loc
assert cb.ax.yaxis.get_major_locator() is loc
loc = FixedLocator(np.arange(0, 7, 0.1))
cb.minorlocator = loc
assert cb.ax.yaxis.get_minor_locator() == loc
fmt = LogFormatter()
cb.formatter = fmt
assert cb.ax.yaxis.get_major_formatter() == fmt
fmt = LogFormatter()
cb.minorformatter = fmt
assert cb.ax.yaxis.get_minor_formatter() == fmt
|
37,283 |
def control(operation: Union[Gate, ControlledGate],
num_ctrl_qubits: Optional[int] = 1,
label: Optional[Union[None, str]] = None,
ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate:
"""Return controlled version of gate using controlled rotations. This function
first checks the name of the operation to see if it knows of a method from which
to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`.
If a method is not directly known, it calls the unroller to convert to `x`, `y`,
`z`, `h`, `rx`, `ry`, `swap`, `ccx`, `u1`, `u3` and `cx` gates.
Args:
operation: The gate used to create the ControlledGate.
num_ctrl_qubits: The number of controls to add to gate (default=1).
label: An optional gate label.
ctrl_state: The control state in decimal or as
a bitstring (e.g. '111'). If specified as a bitstring the length
must equal num_ctrl_qubits, MSB on left. If None, use
2**num_ctrl_qubits-1.
Returns:
Controlled version of gate.
Raises:
CircuitError: gate contains non-gate in definition
"""
from math import pi
# pylint: disable=cyclic-import
import qiskit.circuit.controlledgate as controlledgate
# pylint: disable=unused-import
import qiskit.circuit.library.standard_gates.multi_control_rotation_gates
q_control = QuantumRegister(num_ctrl_qubits, name='control')
q_target = QuantumRegister(operation.num_qubits, name='target')
q_ancillae = None # TODO: add
qc = QuantumCircuit(q_control, q_target)
if operation.name == 'x' or (
isinstance(operation, controlledgate.ControlledGate) and
operation.base_gate.name == 'x'):
qc.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae)
elif operation.name == 'rx':
qc.mcrx(operation.definition.data[0][0].params[0], q_control, q_target[0],
use_basis_gates=True)
elif operation.name == 'ry':
qc.mcry(operation.definition.data[0][0].params[0], q_control, q_target[0],
q_ancillae, mode='noancilla', use_basis_gates=True)
elif operation.name == 'rz':
qc.mcrz(operation.definition.data[0][0].params[0], q_control, q_target[0],
use_basis_gates=True)
else:
basis_gates = ['x', 'y', 'z', 'h', 'rx', 'ry', 'swap', 'ccx', 'u1', 'u3', 'cx']
bgate = _unroll_gate(operation, basis_gates)
# now we have a bunch of single qubit rotation gates and cx
for rule in bgate.definition.data:
if rule[0].name == 'u3':
theta, phi, lamb = rule[0].params
if phi == -pi / 2 and lamb == pi / 2:
qc.mcrx(theta, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
elif phi == 0 and lamb == 0:
qc.mcry(theta, q_control, q_target[rule[1][0].index],
q_ancillae, use_basis_gates=True)
elif theta == 0 and phi == 0:
qc.mcrz(lamb, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
else:
qc.mcrz(lamb, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
qc.mcry(theta, q_control, q_target[rule[1][0].index],
q_ancillae, use_basis_gates=True)
qc.mcrz(phi, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
elif rule[0].name == 'u1':
qc.mcu1(rule[0].params[0], q_control, q_target[rule[1][0].index])
elif rule[0].name == 'cx' or rule[0].name == 'ccx':
additional_control_bits = [bit.index for bit in rule[1][:-1]]
qc.mct(q_control[:] + q_target[additional_control_bits],
q_target[rule[1][-1].index],
q_ancillae)
elif rule[0].name == 'x':
qc.mct(q_control[:], q_target[rule[1][0].index], q_ancillae)
elif rule[0].name == 'z':
from qiskit.circuit.library.standard_gates import ZGate
mcz = ZGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcz, qargs)
elif rule[0].name == 'y':
from qiskit.circuit.library.standard_gates import YGate
mcy = YGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcy, qargs)
elif rule[0].name == 'h':
from qiskit.circuit.library.standard_gates import HGate
mch = HGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mch, qargs)
elif rule[0].name == 'rx':
from qiskit.circuit.library.standard_gates import RXGate
mcrx = RXGate(rule[0].params[0]).control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcrx, qargs)
elif rule[0].name == 'ry':
from qiskit.circuit.library.standard_gates import RYGate
mcry = RYGate(rule[0].params[0]).control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcry, qargs)
elif rule[0].name == 'swap':
from qiskit.circuit.library.standard_gates import SwapGate
mcswap = SwapGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcswap, qargs)
else:
raise CircuitError('gate contains non-controllable instructions')
if isinstance(operation, controlledgate.ControlledGate):
new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits
new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state
base_name = operation.base_gate.name
base_gate = operation.base_gate
else:
new_num_ctrl_qubits = num_ctrl_qubits
new_ctrl_state = ctrl_state
base_name = operation.name
base_gate = operation
# In order to maintain some backward compatibility with gate names this
# uses a naming convention where if the number of controls is <=2 the gate
# is named like "cc<base_gate.name>", else it is named like
# "c<num_ctrl_qubits><base_name>".
if new_num_ctrl_qubits > 2:
ctrl_substr = 'c{0:d}'.format(new_num_ctrl_qubits)
else:
ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c')
new_name = '{0}{1}'.format(ctrl_substr, base_name)
cgate = controlledgate.ControlledGate(new_name,
qc.num_qubits,
operation.params,
label=label,
num_ctrl_qubits=new_num_ctrl_qubits,
definition=qc,
ctrl_state=new_ctrl_state)
cgate.base_gate = base_gate
return cgate
|
def control(operation: Union[Gate, ControlledGate],
num_ctrl_qubits: Optional[int] = 1,
label: Optional[Union[None, str]] = None,
ctrl_state: Optional[Union[None, int, str]] = None) -> ControlledGate:
"""Return controlled version of gate using controlled rotations. This function
first checks the name of the operation to see if it knows of a method from which
to generate a controlled version. Currently these are `x`, `rx`, `ry`, and `rz`.
If a method is not directly known, it calls the unroller to convert to `x`, `y`,
`z`, `h`, `rx`, `ry`, `swap`, `ccx`, `u1`, `u3` and `cx` gates.
Args:
operation: The gate used to create the ControlledGate.
num_ctrl_qubits: The number of controls to add to gate (default=1).
label: An optional gate label.
ctrl_state: The control state in decimal or as
a bitstring (e.g. '111'). If specified as a bitstring the length
must equal num_ctrl_qubits, MSB on left. If None, use
2**num_ctrl_qubits-1.
Returns:
Controlled version of gate.
Raises:
CircuitError: gate contains non-gate in definition
"""
from math import pi
# pylint: disable=cyclic-import
import qiskit.circuit.controlledgate as controlledgate
# pylint: disable=unused-import
import qiskit.circuit.library.standard_gates.multi_control_rotation_gates
q_control = QuantumRegister(num_ctrl_qubits, name='control')
q_target = QuantumRegister(operation.num_qubits, name='target')
q_ancillae = None # TODO: add
qc = QuantumCircuit(q_control, q_target)
if operation.name == 'x' or (
isinstance(operation, controlledgate.ControlledGate) and
operation.base_gate.name == 'x'):
qc.mct(q_control[:] + q_target[:-1], q_target[-1], q_ancillae)
elif operation.name == 'rx':
qc.mcrx(operation.definition.data[0][0].params[0], q_control, q_target[0],
use_basis_gates=True)
elif operation.name == 'ry':
qc.mcry(operation.definition.data[0][0].params[0], q_control, q_target[0],
q_ancillae, mode='noancilla', use_basis_gates=True)
elif operation.name == 'rz':
qc.mcrz(operation.definition.data[0][0].params[0], q_control, q_target[0],
use_basis_gates=True)
else:
basis_gates = ['x', 'y', 'z', 'h', 'rx', 'ry', 'swap', 'ccx', 'u1', 'u3', 'cx']
bgate = _unroll_gate(operation, basis_gates)
# now we have a bunch of single qubit rotation gates and cx
for rule in bgate.definition.data:
if rule[0].name == 'u3':
theta, phi, lamb = rule[0].params
if phi == -pi / 2 and lamb == pi / 2:
qc.mcrx(theta, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
elif phi == 0 and lamb == 0:
qc.mcry(theta, q_control, q_target[rule[1][0].index],
q_ancillae, use_basis_gates=True)
elif theta == 0 and phi == 0:
qc.mcrz(lamb, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
else:
qc.mcrz(lamb, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
qc.mcry(theta, q_control, q_target[rule[1][0].index],
q_ancillae, use_basis_gates=True)
qc.mcrz(phi, q_control, q_target[rule[1][0].index],
use_basis_gates=True)
elif rule[0].name == 'u1':
qc.mcu1(rule[0].params[0], q_control, q_target[rule[1][0].index])
elif rule[0].name == 'cx' or rule[0].name == 'ccx':
additional_control_bits = [bit.index for bit in rule[1][:-1]]
qc.mcx(q_control[:] + q_target[additional_control_bits],
q_target[rule[1][-1].index],
q_ancillae)
elif rule[0].name == 'x':
qc.mct(q_control[:], q_target[rule[1][0].index], q_ancillae)
elif rule[0].name == 'z':
from qiskit.circuit.library.standard_gates import ZGate
mcz = ZGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcz, qargs)
elif rule[0].name == 'y':
from qiskit.circuit.library.standard_gates import YGate
mcy = YGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcy, qargs)
elif rule[0].name == 'h':
from qiskit.circuit.library.standard_gates import HGate
mch = HGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mch, qargs)
elif rule[0].name == 'rx':
from qiskit.circuit.library.standard_gates import RXGate
mcrx = RXGate(rule[0].params[0]).control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcrx, qargs)
elif rule[0].name == 'ry':
from qiskit.circuit.library.standard_gates import RYGate
mcry = RYGate(rule[0].params[0]).control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcry, qargs)
elif rule[0].name == 'swap':
from qiskit.circuit.library.standard_gates import SwapGate
mcswap = SwapGate().control(num_ctrl_qubits)
qargs = q_control[:] + q_target[[bit.index for bit in rule[1]]]
qc.append(mcswap, qargs)
else:
raise CircuitError('gate contains non-controllable instructions')
if isinstance(operation, controlledgate.ControlledGate):
new_num_ctrl_qubits = num_ctrl_qubits + operation.num_ctrl_qubits
new_ctrl_state = operation.ctrl_state << num_ctrl_qubits | ctrl_state
base_name = operation.base_gate.name
base_gate = operation.base_gate
else:
new_num_ctrl_qubits = num_ctrl_qubits
new_ctrl_state = ctrl_state
base_name = operation.name
base_gate = operation
# In order to maintain some backward compatibility with gate names this
# uses a naming convention where if the number of controls is <=2 the gate
# is named like "cc<base_gate.name>", else it is named like
# "c<num_ctrl_qubits><base_name>".
if new_num_ctrl_qubits > 2:
ctrl_substr = 'c{0:d}'.format(new_num_ctrl_qubits)
else:
ctrl_substr = ('{0}' * new_num_ctrl_qubits).format('c')
new_name = '{0}{1}'.format(ctrl_substr, base_name)
cgate = controlledgate.ControlledGate(new_name,
qc.num_qubits,
operation.params,
label=label,
num_ctrl_qubits=new_num_ctrl_qubits,
definition=qc,
ctrl_state=new_ctrl_state)
cgate.base_gate = base_gate
return cgate
|
8,958 |
def find_entry_point_plugins(group='sopel.plugins'):
"""List plugins from a setuptools entry point group.
:param str group: setuptools entry point group to look for
(defaults to ``sopel.plugins``)
:return: yield instance of :class:`~.handlers.EntryPointPlugin`
created from setuptools entry point given ``group``
This function finds plugins declared under a setuptools entry point; by
default it uses the ``sopel.plugins`` entry point.
"""
for entry_point in pkg_resources.iter_entry_points(group):
yield handlers.EntryPointPlugin(entry_point)
|
def find_entry_point_plugins(group='sopel.plugins'):
"""List plugins from a setuptools entry point group.
:param str group: setuptools entry point group to look for
(defaults to ``sopel.plugins``)
:return: yield instances of :class:`~.handlers.EntryPointPlugin`
created from setuptools entry point given ``group``
This function finds plugins declared under a setuptools entry point; by
default it uses the ``sopel.plugins`` entry point.
"""
for entry_point in pkg_resources.iter_entry_points(group):
yield handlers.EntryPointPlugin(entry_point)
|
1,611 |
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=None, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : str, callable or None, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int or None, default=None
The number of CPUs to use to do the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, default=0
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
|
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=None, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : str, callable or None, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None the estimator's score method is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_permutations : int, default=100
Number of times to permute ``y``.
n_jobs : int or None, default=None
The number of CPUs to use to do the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, default=0
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
|
40,810 |
def _join(*content: Tuple[str], sep: str = " ") -> str:
return sep.join(map(str, content))
|
def _join(*content: str, sep: str = " ") -> str:
return sep.join(map(str, content))
|
44,302 |
def basis_rotation(one_electron, two_electron, tol_factor):
r"""Return the grouped coefficients and observables of a molecular Hamiltonian and the basis
rotation unitaries obtained with the basis rotation grouping method.
Args:
one_electron (array[float]): one-electron integral matrix in the molecular orbital basis
two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital
basis arranged in chemist notation
tol_factor (float): threshold error value for discarding the negligible factors
Returns:
tuple(list[array[float]], list[list[Observable]], list[array[float]]): tuple containing the
grouped coefficients and grouped opservables of a Hamiltonian and the basis rotation
unitaries obtained with the basis rotation grouping method
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [1.398397361, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> coeffs, ops, eigvecs = basis_rotation(one, two, tol_factor=1.0e-5)
>>> print(coeffs)
[array([-1.29789639, 0.84064639, 0.45725000]),
array([-0.00019476, -0.01100037, 0.02239026, -0.01119513]),
array([ 0.36242096, -0.18121048, -0.18121048]),
array([-1.36155423, 2.03646071, -1.34981296, 0.67490648])]
.. details::
:title: Theory
A second-quantized molecular Hamiltonian can be constructed in the
`chemist notation <http://vergil.chemistry.gatech.edu/notes/permsymm/permsymm.pdf>`_ format
following Eq. (1) of
[`PRX Quantum 2, 030305, 2021 <https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.2.030305>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta},
where :math:`V_{pqrs}` denotes a two-electron integral in the chemist notation and
:math:`T_{pq}` is obtained from the one- and two electron integrals, :math:`h_{pq}` and
:math:`h_{pssq}`, as
.. math::
T_{pq} = h_{pq} - \frac{1}{2} \sum_s h_{pssq}.
The tensor :math:`V` can be converted to a matrix which is indexed by the indices :math:`pq`
and :math:`rs` and eigendecomposed up to a rank :math:`R` to give
.. math::
V_{pqrs} = \sum_r^R L_{pq}^{(r)} L_{rs}^{(r) T},
where :math:`L` denotes the eigenvectors of the matrix. The molecular Hamiltonian can then
be rewritten following Eq. (7) of
[`Phys. Rev. Research 3, 033055, 2021 <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.3.033055>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_r^R \left ( \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pq}
L_{pq}^{(r)} a_{p, \alpha}^{\dagger} a_{q, \alpha} \right )^2.
The orbital basis can be rotated such that each :math:`T` and :math:`L^{(r)}` matrix is
diagonal. The Hamiltonian can then be written following Eq. (2) of
[`npj Quantum Information, 7, 23 (2021) <https://www.nature.com/articles/s41534-020-00341-7>`_]
as
.. math::
H = U_0 \left ( \sum_p d_p n_p \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{pq}
d_{pq}^{(r)} n_p n_q \right ) U_r^{\dagger}
where the coefficients :math:`d` are obtained by diagonalizing the :math:`T` and
:math:`L^{(r)}` matrices. The number operators :math:`n_p = a_p^{\dagger} a_p` can be
converted to qubit operators using
.. math::
n_p = \frac{1-Z_p}{2}
where :math:`Z_p` is the Pauli :math:`Z` operator applied to qubit :math:`p`. This gives
the qubit Hamiltonian
.. math::
H = U_0 \left ( \sum_p O_p^{(0)} \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{q} O_q^{(r)} \right ) U_r^{\dagger},
where :math:`O = \sum_i c_i P_i` is a linear combination of Pauli words :math:`P_` that are
a tensor product of Pauli :math:`Z` and Identity operators. This allows all the Pauli words
in each of the :math:`O` terms to be measured simultaneously. This function returns the
coefficients and the Pauli words grouped for each of the :math:`O` terms as well as the
eigenvectors of the :math:`T` and :math:`L^{(r)}` matrices that can be used to construct the
basis rotation unitaries :math:`U`.
"""
two_electron = np.swapaxes(two_electron, 1, 3)
_, eigvals_m, eigvecs_m = qml.qchem.factorize(two_electron, tol_factor, 0.0)
t_matrix = one_electron - 0.5 * np.einsum("illj", two_electron)
t_eigvals, t_eigvecs = np.linalg.eigh(t_matrix)
eigvals = [np.array(t_eigvals)] + [np.outer(x, x).flatten() * 0.5 for x in eigvals_m]
eigvecs = [t_eigvecs] + eigvecs_m
ops_t = 0.0
for i in range(len(eigvals[0])):
ops_t += 0.5 * eigvals[0][i] * qml.Identity(i) - 0.5 * eigvals[0][i] * qml.PauliZ(i)
ops_l = []
for coeff in eigvals[1:]:
ops_l_ = 0.0
for i in range(len(coeff) // 2):
for j in range(len(coeff) // 2):
cc = coeff[i + j]
if i == j:
ops_l_ += cc * (
qml.Identity(i) - qml.PauliZ(i) - qml.PauliZ(j) + qml.Identity(i)
)
else:
ops_l_ += cc * (
qml.Identity(i)
- qml.PauliZ(i)
- qml.PauliZ(j)
+ qml.grouping.pauli_mult_with_phase(qml.PauliZ(i), qml.PauliZ(j))[0]
)
ops_l.append(ops_l_.tolist())
ops = [ops_t.tolist()] + ops_l
c_group = [op.coeffs for op in ops]
o_group = [op.ops for op in ops]
return c_group, o_group, eigvecs
|
def basis_rotation(one_electron, two_electron, tol_factor):
r"""Return the grouped coefficients and observables of a molecular Hamiltonian and the basis
rotation unitaries obtained with the basis rotation grouping method.
Args:
one_electron (array[float]): one-electron integral matrix in the molecular orbital basis
two_electron (array[array[float]]): two-electron integral tensor in the molecular orbital
basis arranged in chemist notation
tol_factor (float): threshold error value for discarding the negligible factors
Returns:
tuple(list[array[float]], list[list[Observable]], list[array[float]]): tuple containing the
grouped coefficients and grouped opservables of a Hamiltonian and the basis rotation
unitaries obtained with the basis rotation grouping method
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [1.398397361, 0.0, 0.0]], requires_grad = False)
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> coeffs, ops, eigvecs = basis_rotation(one, two, tol_factor=1.0e-5)
>>> print(coeffs)
[array([-1.29789639, 0.84064639, 0.45725000]),
array([-0.00019476, -0.01100037, 0.02239026, -0.01119513]),
array([ 0.36242096, -0.18121048, -0.18121048]),
array([-1.36155423, 2.03646071, -1.34981296, 0.67490648])]
.. details::
:title: Theory
A second-quantized molecular Hamiltonian can be constructed in the
`chemist notation <http://vergil.chemistry.gatech.edu/notes/permsymm/permsymm.pdf>`_ format
following Eq. (1) of
[`PRX Quantum 2, 030305, 2021 <https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.2.030305>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta},
where :math:`V_{pqrs}` denotes a two-electron integral in the chemist notation and
:math:`T_{pq}` is obtained from the one- and two electron integrals, :math:`h_{pq}` and
:math:`h_{pssq}`, as
.. math::
T_{pq} = h_{pq} - \frac{1}{2} \sum_s h_{pssq}.
The tensor :math:`V` can be converted to a matrix which is indexed by the indices :math:`pq`
and :math:`rs` and eigendecomposed up to a rank :math:`R` to give
.. math::
V_{pqrs} = \sum_r^R L_{pq}^{(r)} L_{rs}^{(r) T},
where :math:`L` denotes the eigenvectors of the matrix. The molecular Hamiltonian can then
be rewritten following Eq. (7) of
[`Phys. Rev. Research 3, 033055, 2021 <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.3.033055>`_]
as
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_r^R \left ( \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pq}
L_{pq}^{(r)} a_{p, \alpha}^{\dagger} a_{q, \alpha} \right )^2.
The orbital basis can be rotated such that each :math:`T` and :math:`L^{(r)}` matrix is
diagonal. The Hamiltonian can then be written following Eq. (2) of
[`npj Quantum Information, 7, 23 (2021) <https://www.nature.com/articles/s41534-020-00341-7>`_]
as
.. math::
H = U_0 \left ( \sum_p d_p n_p \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{pq}
d_{pq}^{(r)} n_p n_q \right ) U_r^{\dagger},
where the coefficients :math:`d` are obtained by diagonalizing the :math:`T` and
:math:`L^{(r)}` matrices. The number operators :math:`n_p = a_p^{\dagger} a_p` can be
converted to qubit operators using
.. math::
n_p = \frac{1-Z_p}{2}
where :math:`Z_p` is the Pauli :math:`Z` operator applied to qubit :math:`p`. This gives
the qubit Hamiltonian
.. math::
H = U_0 \left ( \sum_p O_p^{(0)} \right ) U_0^{\dagger} + \sum_r^R U_r \left ( \sum_{q} O_q^{(r)} \right ) U_r^{\dagger},
where :math:`O = \sum_i c_i P_i` is a linear combination of Pauli words :math:`P_` that are
a tensor product of Pauli :math:`Z` and Identity operators. This allows all the Pauli words
in each of the :math:`O` terms to be measured simultaneously. This function returns the
coefficients and the Pauli words grouped for each of the :math:`O` terms as well as the
eigenvectors of the :math:`T` and :math:`L^{(r)}` matrices that can be used to construct the
basis rotation unitaries :math:`U`.
"""
two_electron = np.swapaxes(two_electron, 1, 3)
_, eigvals_m, eigvecs_m = qml.qchem.factorize(two_electron, tol_factor, 0.0)
t_matrix = one_electron - 0.5 * np.einsum("illj", two_electron)
t_eigvals, t_eigvecs = np.linalg.eigh(t_matrix)
eigvals = [np.array(t_eigvals)] + [np.outer(x, x).flatten() * 0.5 for x in eigvals_m]
eigvecs = [t_eigvecs] + eigvecs_m
ops_t = 0.0
for i in range(len(eigvals[0])):
ops_t += 0.5 * eigvals[0][i] * qml.Identity(i) - 0.5 * eigvals[0][i] * qml.PauliZ(i)
ops_l = []
for coeff in eigvals[1:]:
ops_l_ = 0.0
for i in range(len(coeff) // 2):
for j in range(len(coeff) // 2):
cc = coeff[i + j]
if i == j:
ops_l_ += cc * (
qml.Identity(i) - qml.PauliZ(i) - qml.PauliZ(j) + qml.Identity(i)
)
else:
ops_l_ += cc * (
qml.Identity(i)
- qml.PauliZ(i)
- qml.PauliZ(j)
+ qml.grouping.pauli_mult_with_phase(qml.PauliZ(i), qml.PauliZ(j))[0]
)
ops_l.append(ops_l_.tolist())
ops = [ops_t.tolist()] + ops_l
c_group = [op.coeffs for op in ops]
o_group = [op.ops for op in ops]
return c_group, o_group, eigvecs
|
50,324 |
def test_list_project_users(gl, resp_list_users):
user = gl.projects.get(1).users.list()[0]
assert isinstance(user, ProjectUser)
assert user.id == 1
assert user.name == "first"
assert user.state == "active"
|
def test_list_project_users(project, resp_list_users):
user = project.users.list()[0]
assert isinstance(user, ProjectUser)
assert user.id == 1
assert user.name == "first"
assert user.state == "active"
|
29,829 |
def _get_tzdata(timezone_filepath=os.path.join(os.path.sep, "etc", "timezone")) -> str:
"""Return the host's timezone from timezon_filepath or Etc/UTC on error."""
try:
with open(timezone_filepath) as timezone_file:
timezone = timezone_file.read().strip()
except FileNotFoundError:
timezone = "Etc/UTC"
return timezone
|
def _get_tzdata(timezone_filepath=os.path.join(os.path.sep, "etc", "timezone")) -> str:
"""Return the host's timezone from timezone_filepath or Etc/UTC on error."""
try:
with open(timezone_filepath) as timezone_file:
timezone = timezone_file.read().strip()
except FileNotFoundError:
timezone = "Etc/UTC"
return timezone
|
12,971 |
def resolve_checkouts(channel_slug):
queryset = models.Checkout.objects.all()
if channel_slug:
queryset = queryset.filter(channel__slug=str(channel_slug))
return queryset
|
def resolve_checkouts(channel_slug):
queryset = models.Checkout.objects.all()
if channel_slug:
queryset = queryset.filter(channel__slug=channel_slug)
return queryset
|
33,122 |
def Id_Obs(Nx):
"""Specify identity observations of entire state.
It is not a function of time.
Parameters
----------
Nx: int
Number of total length of state vector
Returns
-------
Obs: dict
Observation operator including size of the observation space,
observation operator/model and tangent linear observation operator
"""
return partial_Id_Obs(Nx, np.arange(Nx))
|
def Id_Obs(Nx):
"""Specify identity observations of entire state.
It is not a function of time.
Parameters
----------
Nx: int
Length of state vector
Returns
-------
Obs: dict
Observation operator including size of the observation space,
observation operator/model and tangent linear observation operator
"""
return partial_Id_Obs(Nx, np.arange(Nx))
|
3,067 |
def _maybe_asobject(dtype, klass, data, copy: bool, name: Label, **kwargs):
"""
If and object dtype was specified, create the non-object Index
and then convert it to object.
Parameters
----------
dtype : np.dtype, ExtensionDtype, str
klass : Index subclass
data : list-like
copy : bool
name : hashable
**kwargs
Returns
-------
Index
Notes
-----
We assume that calling .astype(object) on this klass will make a copy.
"""
# GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
# will raise in the where `data` is already tz-aware. So
# we leave it out of this step and cast to object-dtype after
# the DatetimeIndex construction.
if is_dtype_equal(_o_dtype, dtype):
# Note we can pass copy=False because the .astype below
# will always make a copy
index = klass(data, copy=False, name=name, **kwargs)
return index.astype(object)
return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
|
def _maybe_asobject(dtype, klass, data, copy: bool, name: Label, **kwargs):
"""
If an object dtype was specified, create the non-object Index
and then convert it to object.
Parameters
----------
dtype : np.dtype, ExtensionDtype, str
klass : Index subclass
data : list-like
copy : bool
name : hashable
**kwargs
Returns
-------
Index
Notes
-----
We assume that calling .astype(object) on this klass will make a copy.
"""
# GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
# will raise in the where `data` is already tz-aware. So
# we leave it out of this step and cast to object-dtype after
# the DatetimeIndex construction.
if is_dtype_equal(_o_dtype, dtype):
# Note we can pass copy=False because the .astype below
# will always make a copy
index = klass(data, copy=False, name=name, **kwargs)
return index.astype(object)
return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
|
56,639 |
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
number_parts = parts['number'].split('.')
integer = number_parts[0]
# Copy decimal without losing precision
decimal = '.' + number_parts[1] if len(number_parts) > 1 else ''
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
def normalize_ddc(ddc):
"""
:param str ddc:
:rtype: list of str
"""
ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '')
results = []
for match in DDC_RE.finditer(ddc):
parts = match.groupdict()
prefix = ''
suffix = ''
# DDCs should start at word boundaries
start = match.start()
if start > 0 and re.search(r'\b', ddc[start - 1]):
continue
# And end at them
end = match.end()
if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]):
continue
# Some old standard which isn't used anymore; might need to filter these
# out, but they should sort OK so let's keep them.
if parts['neg']:
prefix += '-'
# Juvenile prefix
if parts['j']:
prefix += 'j'
# Star should be at end
if parts['prestar'] or parts['poststar']:
suffix = '*'
# Series suffix
if parts['s']:
suffix += ' s'
# Biographical
if parts['B']:
suffix += ' B'
# Not at all sure
if parts['ninetwo']:
suffix += parts['ninetwo']
# And now the actual number!
if parts['number']:
# Numbers in parenthesis are "series" numbers
end = match.end('number')
if end < len(ddc) and ddc[end] == ')':
suffix += ' s'
# pad the integer part of the number
integer, _, decimal = parts['number'].partition('.')
# Copy decimal without losing precision
if decimal:
decimal = '.' + decimal
number = '%03d%s' % (int(integer), decimal)
# Handle [Fic] or [E]
elif parts['fic']:
number = '[%s]' % parts['fic'].title()
else:
continue
results.append(prefix + number + suffix)
return results
|
19,281 |
def test_TermStatusLine():
# Smoke test for TermStatusLine; to actually test output it would be great
# to pass a StringIO instance, but we use tty.msg() internally which does not
# accept that. `with log_output(buf)` doesn't really work because it trims output
# and we actually want to test for escape sequences etc.
x = inst.TermStatusLine(enabled=True)
x.add("a")
x.add("b")
x.clear()
|
def test_term_status_line():
# Smoke test for TermStatusLine; to actually test output it would be great
# to pass a StringIO instance, but we use tty.msg() internally which does not
# accept that. `with log_output(buf)` doesn't really work because it trims output
# and we actually want to test for escape sequences etc.
x = inst.TermStatusLine(enabled=True)
x.add("a")
x.add("b")
x.clear()
|
54,342 |
def dropout_input(seg_pair):
"""Applies input-level dropout: zero to all channels minus one will be randomly set to zeros. This function verifies
if some channels are already empty. Always at least one input channel will be kept.
Args:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata.
Return:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata with channel(s) dropped.
"""
n_channels = seg_pair['input'].size(0)
# Verify if the input is multichannel
if n_channels > 1:
# Verify if some channels are already empty
n_unique_values = [len(torch.unique(input_data)) > 1 for input_data in seg_pair['input']]
idx_empty = np.where(np.invert(n_unique_values))[0]
# Select how many channels will be dropped between 0 and n_channels - 1 (keep at least one input)
n_dropped = random.randint(0, n_channels - 1)
if n_dropped > len(idx_empty):
# Remove empty channel to the number of channels to drop
n_dropped = n_dropped - len(idx_empty)
# Select which channels will be dropped
idx_dropped = []
while len(idx_dropped) != n_dropped:
idx = random.randint(0, n_channels - 1)
# Don't include the empty channel in the dropped channels
if idx not in idx_empty:
idx_dropped.append(idx)
else:
idx_dropped = idx_empty
seg_pair['input'][idx_dropped] = torch.zeros_like(seg_pair['input'][idx_dropped])
else:
logger.warning("\n Impossible to apply input-level dropout since input is not multi-channel.")
return seg_pair
|
def dropout_input(seg_pair):
"""Applies input-level dropout: zero to all channels minus one will be randomly set to zeros. This function verifies
if some channels are already empty. Always at least one input channel will be kept.
Args:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata.
Return:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata with channel(s) dropped.
"""
n_channels = seg_pair['input'].size(0)
# Verify if the input is multichannel
if n_channels > 1:
# Verify if some channels are already empty
n_unique_values = [len(torch.unique(input_data)) > 1 for input_data in seg_pair['input']]
idx_empty = np.where(np.invert(n_unique_values))[0]
# Select how many channels will be dropped between 0 and n_channels - 1 (keep at least one input)
n_dropped = random.randint(0, n_channels - 1)
if n_dropped > len(idx_empty):
# Remove empty channel to the number of channels to drop
n_dropped = n_dropped - len(idx_empty)
# Select which channels will be dropped
idx_dropped = []
while len(idx_dropped) != n_dropped:
idx = random.randint(0, n_channels - 1)
# Don't include the empty channel in the dropped channels
if idx not in idx_empty:
idx_dropped.append(idx)
else:
idx_dropped = idx_empty
seg_pair['input'][idx_dropped] = torch.zeros_like(seg_pair['input'][idx_dropped])
else:
logger.warning("\n Impossible to apply input-level dropout since input is not multi-channel.")
return seg_pair
|
30,522 |
def main():
commands = {
'domain': domain_command,
'email': email_command,
'file': file_command,
'ip': ip_command,
'url': url_command,
'illuminate-enrich-string': illuminate_enrich_string_command,
'illuminate-enrich-ipv6': illuminate_enrich_ipv6_command,
'illuminate-enrich-mutex': illuminate_enrich_mutex_command,
'illuminate-enrich-http-request': illuminate_enrich_http_request_command
}
command: str = demisto.command()
LOG(f'command is {command}')
try:
client = build_client(demisto.params())
if command == 'test-module':
perform_test_module(client)
demisto.results('ok')
elif command in commands:
enrichment_output: EnrichmentOutput = commands[command](client, demisto.args())
enrichment_output.return_outputs()
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
|
def main():
commands = {
'domain': domain_command,
'email': email_command,
'file': file_command,
'ip': ip_command,
'url': url_command,
'illuminate-enrich-string': illuminate_enrich_string_command,
'illuminate-enrich-ipv6': illuminate_enrich_ipv6_command,
'illuminate-enrich-mutex': illuminate_enrich_mutex_command,
'illuminate-enrich-http-request': illuminate_enrich_http_request_command
}
command: str = demisto.command()
LOG(f'command is {command}')
try:
client = build_client(demisto.params())
if command == 'test-module':
perform_test_module(client)
demisto.results('ok')
elif command in commands:
enrichment_output: EnrichmentOutput = commands[command](client, demisto.args())
enrichment_output.return_outputs()
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]\nTrace:\n{traceback.format_exc()}'
return_error(err_msg, error=e)
|
36,281 |
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Optional[Program] = None,
active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input expt_results and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar via tqdm if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
# get unique observables that will need to be calibrated
observables = {copy(res.setting.out_operator) for res in expt_results}
calibrations = {}
for obs in tqdm(observables, disable=not show_progress_bar):
prog = get_calibration_program(obs, noisy_program, active_reset)
meas_qs = obs.get_qubits()
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting, len(results))
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Optional[Program] = None,
active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input ``expt_results`` and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar via tqdm if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
# get unique observables that will need to be calibrated
observables = {copy(res.setting.out_operator) for res in expt_results}
calibrations = {}
for obs in tqdm(observables, disable=not show_progress_bar):
prog = get_calibration_program(obs, noisy_program, active_reset)
meas_qs = obs.get_qubits()
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting, len(results))
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
57,731 |
def main() -> None:
try:
entity = demisto.args().get('indicator').get('CustomFields', {}).get('chronicleassetip', '')
potentially_isolated = demisto.args().get('indicator').get('CustomFields', {}) \
.get('chroniclepotentiallyblockedip', 'No')
html = get_html_representation(entity, potentially_isolated)
demisto.results({
"Type": 1,
"ContentsFormat": formats["html"],
"Contents": html
})
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
|
def main() -> None:
try:
indicator = args['indicator']
entity = indicator.get('CustomFields', {}).get('chronicleassetip', '')
potentially_isolated = indicator.get('CustomFields', {}).get('chroniclepotentiallyblockedip', 'No')
html = get_html_representation(entity, potentially_isolated)
demisto.results({
"Type": 1,
"ContentsFormat": formats["html"],
"Contents": html
})
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f'Could not load widget:\n{e}')
|
33,794 |
def disconnect():
"""This command is idempotent."""
return ray.disconnect()
|
def disconnect():
"""Disconnects from server; is idempotent."""
return ray.disconnect()
|
5,349 |
def add_tags(
domain_name=None,
arn=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags.
:param str domain_name: The name of the Elasticsearch domain you want to add tags to.
:param str arn: The ARN of the Elasticsearch domain you want to add tags to.
Specifying this overrides ``domain_name``.
:param dict tags: The dict of tags to add to the Elasticsearch domain.
:rtype: dict
:return: Dictionary with key 'result' and as value a boolean denoting success or failure.
Upon failure, also contains a key 'error' with the error message as value.
.. versionadded:: Natrium
CLI Example:
.. code-block:: bash
salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}'
"""
if not any((arn, domain_name)):
raise SaltInvocationError(
"At least one of domain_name or arn must be specified."
)
ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in res:
ret.update(res)
elif not res["result"]:
ret.update(
{
"error": 'The domain with name "{}" does not exist.'.format(
domain_name
)
}
)
else:
arn = res["response"].get("ARN")
if arn:
boto_params = {
"ARN": arn,
"TagList": [
{"Key": k, "Value": value} for k, value in tags.items() or {}.items()
],
}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.add_tags(**boto_params)
ret["result"] = True
except (ParamValidationError, ClientError) as exp:
ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
|
def add_tags(
domain_name=None,
arn=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags.
:param str domain_name: The name of the Elasticsearch domain you want to add tags to.
:param str arn: The ARN of the Elasticsearch domain you want to add tags to.
Specifying this overrides ``domain_name``.
:param dict tags: The dict of tags to add to the Elasticsearch domain.
:rtype: dict
:return: Dictionary with key 'result' and as value a boolean denoting success or failure.
Upon failure, also contains a key 'error' with the error message as value.
.. versionadded:: Natrium
CLI Example:
.. code-block:: bash
salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}'
"""
if not any((arn, domain_name)):
raise SaltInvocationError(
"At least one of domain_name or arn must be specified."
)
ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in res:
ret.update(res)
elif not res["result"]:
ret.update(
{
"error": 'The domain with name "{}" does not exist.'.format(
domain_name
)
}
)
else:
arn = res["response"].get("ARN")
if arn:
boto_params = {
"ARN": arn,
"TagList": [
{"Key": k, "Value": value} for k, value in (tags or {}).items()
],
}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.add_tags(**boto_params)
ret["result"] = True
except (ParamValidationError, ClientError) as exp:
ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.