id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
37,951 |
def load_sample_bathymetry():
"""
Load a table of ship observations of bathymetry off Baja California as a
pandas.DataFrame.
This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude, latitude, and bathymetry.
"""
fname = which("@tut_ship.xyz", download="c")
data = pd.read_csv(
fname, sep="\t", header=None, names=["longitude", "latitude", "bathymetry"]
)
return data
|
def load_sample_bathymetry():
"""
Load a table of ship observations of bathymetry off Baja California as a
pandas.DataFrame.
This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Columns are longitude, latitude, and bathymetry.
"""
fname = which("@tut_ship.xyz", download="c")
data = pd.read_csv(
fname, sep="\t", header=None, names=["longitude", "latitude", "bathymetry"]
)
return data
|
51,559 |
def imshow(
img,
zmin=None,
zmax=None,
origin=None,
labels={},
x=None,
y=None,
animation_frame=None,
facet_col=None,
facet_col_wrap=None,
facet_col_spacing=None,
facet_row_spacing=None,
color_continuous_scale=None,
color_continuous_midpoint=None,
range_color=None,
title=None,
template=None,
width=None,
height=None,
aspect=None,
contrast_rescaling=None,
binary_string=None,
binary_backend="auto",
binary_compression_level=4,
binary_format="png",
text_auto=False,
):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
img: array-like image, or xarray
The image data. Supported array shapes are
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values.
- (M, N, 4): an image with RGBA values, i.e. including transparency.
zmin, zmax : scalar or iterable, optional
zmin and zmax define the scalar range that the colormap covers. By default,
zmin and zmax correspond to the min and max values of the datatype for integer
datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
a multichannel image of floats, the max of the image is computed and zmax is the
smallest power of 256 (1, 255, 65535) greater than this max value,
with a 5% tolerance. For a single-channel image, the max of the image is used.
Overridden by range_color.
origin : str, 'upper' or 'lower' (default 'upper')
position of the [0, 0] pixel of the image array, in the upper left or lower left
corner. The convention 'upper' is typically used for matrices and images.
labels : dict with str keys and str values (default `{}`)
Sets names used in the figure for axis titles (keys ``x`` and ``y``),
colorbar title and hoverlabel (key ``color``). The values should correspond
to the desired label to be displayed. If ``img`` is an xarray, dimension
names are used for axis titles, and long name for the colorbar title
(unless overridden in ``labels``). Possible keys are: x, y, and color.
x, y: list-like, optional
x and y are used to label the axes of single-channel heatmap visualizations and
their lengths must match the lengths of the second and first dimensions of the
img argument. They are auto-populated if the input is an xarray.
animation_frame: int or str, optional (default None)
axis number along which the image array is sliced to create an animation plot.
If `img` is an xarray, `animation_frame` can be the name of one the dimensions.
facet_col: int or str, optional (default None)
axis number along which the image array is sliced to create a facetted plot.
If `img` is an xarray, `facet_col` can be the name of one the dimensions.
facet_col_wrap: int
Maximum number of facet columns. Wraps the column variable at this width,
so that the column facets span multiple rows.
Ignored if `facet_col` is None.
facet_col_spacing: float between 0 and 1
Spacing between facet columns, in paper units. Default is 0.02.
facet_row_spacing: float between 0 and 1
Spacing between facet rows created when ``facet_col_wrap`` is used, in
paper units. Default is 0.0.7.
color_continuous_scale : str or list of str
colormap used to map scalar data to colors (for a 2D image). This parameter is
not used for RGB or RGBA images. If a string is provided, it should be the name
of a known color scale, and if a list is provided, it should be a list of CSS-
compatible colors.
color_continuous_midpoint : number
If set, computes the bounds of the continuous color scale to have the desired
midpoint. Overridden by range_color or zmin and zmax.
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
for single-channel images.
title : str
The figure title.
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
width : number
The figure width in pixels.
height: number
The figure height in pixels.
aspect: 'equal', 'auto', or None
- 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
- 'auto': The axes is kept fixed and the aspect ratio of pixels is
adjusted so that the data fit in the axes. In general, this will
result in non-square pixels.
- if None, 'equal' is used for numpy arrays and 'auto' for xarrays
(which have typically heterogeneous coordinates)
contrast_rescaling: 'minmax', 'infer', or None
how to determine data values corresponding to the bounds of the color
range, when zmin or zmax are not passed. If `minmax`, the min and max
values of the image are used. If `infer`, a heuristic based on the image
data type is used.
binary_string: bool, default None
if True, the image data are first rescaled and encoded as uint8 and
then passed to plotly.js as a b64 PNG string. If False, data are passed
unchanged as a numerical array. Setting to True may lead to performance
gains, at the cost of a loss of precision depending on the original data
type. If None, use_binary_string is set to True for multichannel (eg) RGB
arrays, and to False for single-channel (2D) arrays. 2D arrays are
represented as grayscale and with no colorbar if use_binary_string is
True.
binary_backend: str, 'auto' (default), 'pil' or 'pypng'
Third-party package for the transformation of numpy arrays to
png b64 strings. If 'auto', Pillow is used if installed, otherwise
pypng.
binary_compression_level: int, between 0 and 9 (default 4)
png compression level to be passed to the backend when transforming an
array to a png b64 string. Increasing `binary_compression` decreases the
size of the png string, but the compression step takes more time. For most
images it is not worth using levels greater than 5, but it's possible to
test `len(fig.data[0].source)` and to time the execution of `imshow` to
tune the level of compression. 0 means no compression (not recommended).
binary_format: str, 'png' (default) or 'jpg'
compression format used to generate b64 string. 'png' is recommended
since it uses lossless compression, but 'jpg' (lossy) compression can
result if smaller binary strings for natural images.
text_auto: bool or str (default `False`)
If `True` or a string, single-channel `img` values will be displayed as text",
A string like `'.2f'`, it will be interpreted as a `texttemplate` numeric formatting directive.
Returns
-------
fig : graph_objects.Figure containing the displayed image
See also
--------
plotly.graph_objects.Image : image trace
plotly.graph_objects.Heatmap : heatmap trace
Notes
-----
In order to update and customize the returned figure, use
`go.Figure.update_traces` or `go.Figure.update_layout`.
If an xarray is passed, dimensions names and coordinates are used for
axes labels and ticks.
"""
args = locals()
apply_default_cascade(args)
labels = labels.copy()
nslices_facet = 1
if facet_col is not None:
if isinstance(facet_col, str):
facet_col = img.dims.index(facet_col)
nslices_facet = img.shape[facet_col]
facet_slices = range(nslices_facet)
ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices_facet
nrows = (
nslices_facet // ncols + 1
if nslices_facet % ncols
else nslices_facet // ncols
)
else:
nrows = 1
ncols = 1
if animation_frame is not None:
if isinstance(animation_frame, str):
animation_frame = img.dims.index(animation_frame)
nslices_animation = img.shape[animation_frame]
animation_slices = range(nslices_animation)
slice_dimensions = (facet_col is not None) + (
animation_frame is not None
) # 0, 1, or 2
facet_label = None
animation_label = None
img_is_xarray = False
# ----- Define x and y, set labels if img is an xarray -------------------
if xarray_imported and isinstance(img, xarray.DataArray):
dims = list(img.dims)
img_is_xarray = True
if facet_col is not None:
facet_slices = img.coords[img.dims[facet_col]].values
_ = dims.pop(facet_col)
facet_label = img.dims[facet_col]
if animation_frame is not None:
animation_slices = img.coords[img.dims[animation_frame]].values
_ = dims.pop(animation_frame)
animation_label = img.dims[animation_frame]
y_label, x_label = dims[0], dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if np.issubdtype(img.coords[ax].dtype, np.datetime64):
img.coords[ax] = img.coords[ax].astype(str)
if x is None:
x = img.coords[x_label].values
if y is None:
y = img.coords[y_label].values
if aspect is None:
aspect = "auto"
if labels.get("x", None) is None:
labels["x"] = x_label
if labels.get("y", None) is None:
labels["y"] = y_label
if labels.get("animation_frame", None) is None:
labels["animation_frame"] = animation_label
if labels.get("facet_col", None) is None:
labels["facet_col"] = facet_label
if labels.get("color", None) is None:
labels["color"] = xarray.plot.utils.label_from_attrs(img)
labels["color"] = labels["color"].replace("\n", "<br>")
else:
if hasattr(img, "columns") and hasattr(img.columns, "__len__"):
if x is None:
x = img.columns
if labels.get("x", None) is None and hasattr(img.columns, "name"):
labels["x"] = img.columns.name or ""
if hasattr(img, "index") and hasattr(img.index, "__len__"):
if y is None:
y = img.index
if labels.get("y", None) is None and hasattr(img.index, "name"):
labels["y"] = img.index.name or ""
if labels.get("x", None) is None:
labels["x"] = ""
if labels.get("y", None) is None:
labels["y"] = ""
if labels.get("color", None) is None:
labels["color"] = ""
if aspect is None:
aspect = "equal"
# --- Set the value of binary_string (forbidden for pandas)
if isinstance(img, pd.DataFrame):
if binary_string:
raise ValueError("Binary strings cannot be used with pandas arrays")
is_dataframe = True
else:
is_dataframe = False
# --------------- Starting from here img is always a numpy array --------
img = np.asanyarray(img)
# Reshape array so that animation dimension comes first, then facets, then images
if facet_col is not None:
img = np.moveaxis(img, facet_col, 0)
if animation_frame is not None and animation_frame < facet_col:
animation_frame += 1
facet_col = True
if animation_frame is not None:
img = np.moveaxis(img, animation_frame, 0)
animation_frame = True
args["animation_frame"] = (
"animation_frame"
if labels.get("animation_frame") is None
else labels["animation_frame"]
)
iterables = ()
if animation_frame is not None:
iterables += (range(nslices_animation),)
if facet_col is not None:
iterables += (range(nslices_facet),)
# Default behaviour of binary_string: True for RGB images, False for 2D
if binary_string is None:
binary_string = img.ndim >= (3 + slice_dimensions) and not is_dataframe
# Cast bools to uint8 (also one byte)
if img.dtype == np.bool:
img = 255 * img.astype(np.uint8)
if range_color is not None:
zmin = range_color[0]
zmax = range_color[1]
# -------- Contrast rescaling: either minmax or infer ------------------
if contrast_rescaling is None:
contrast_rescaling = "minmax" if img.ndim == (2 + slice_dimensions) else "infer"
# We try to set zmin and zmax only if necessary, because traces have good defaults
if contrast_rescaling == "minmax":
# When using binary_string and minmax we need to set zmin and zmax to rescale the image
if (zmin is not None or binary_string) and zmax is None:
zmax = img.max()
if (zmax is not None or binary_string) and zmin is None:
zmin = img.min()
else:
# For uint8 data and infer we let zmin and zmax to be None if passed as None
if zmax is None and img.dtype != np.uint8:
zmax = _infer_zmax_from_type(img)
if zmin is None and zmax is not None:
zmin = 0
# For 2d data, use Heatmap trace, unless binary_string is True
if img.ndim == 2 + slice_dimensions and not binary_string:
y_index = slice_dimensions
if y is not None and img.shape[y_index] != len(y):
raise ValueError(
"The length of the y vector must match the length of the first "
+ "dimension of the img matrix."
)
x_index = slice_dimensions + 1
if x is not None and img.shape[x_index] != len(x):
raise ValueError(
"The length of the x vector must match the length of the second "
+ "dimension of the img matrix."
)
if text_auto is True:
texttemplate = "%{z}"
elif text_auto is not False:
texttemplate = "%{z:" + text_auto + "}"
traces = [
go.Heatmap(
x=x,
y=y,
z=img[index_tup],
coloraxis="coloraxis1",
name=str(i),
texttemplate=texttemplate,
)
for i, index_tup in enumerate(itertools.product(*iterables))
]
autorange = True if origin == "lower" else "reversed"
layout = dict(yaxis=dict(autorange=autorange))
if aspect == "equal":
layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
layout["yaxis"]["constrain"] = "domain"
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=color_continuous_midpoint,
cmin=zmin,
cmax=zmax,
)
if labels["color"]:
layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
# For 2D+RGB data, use Image trace
elif (
img.ndim >= 3
and (img.shape[-1] in [3, 4] or slice_dimensions and binary_string)
) or (img.ndim == 2 and binary_string):
rescale_image = True # to check whether image has been modified
if zmin is not None and zmax is not None:
zmin, zmax = (
_vectorize_zvalue(zmin, mode="min"),
_vectorize_zvalue(zmax, mode="max"),
)
x0, y0, dx, dy = (None,) * 4
error_msg_xarray = (
"Non-numerical coordinates were passed with xarray `img`, but "
"the Image trace cannot handle it. Please use `binary_string=False` "
"for 2D data or pass instead the numpy array `img.values` to `px.imshow`."
)
if x is not None:
x = np.asanyarray(x)
if np.issubdtype(x.dtype, np.number):
x0 = x[0]
dx = x[1] - x[0]
else:
error_msg = (
error_msg_xarray
if img_is_xarray
else (
"Only numerical values are accepted for the `x` parameter "
"when an Image trace is used."
)
)
raise ValueError(error_msg)
if y is not None:
y = np.asanyarray(y)
if np.issubdtype(y.dtype, np.number):
y0 = y[0]
dy = y[1] - y[0]
else:
error_msg = (
error_msg_xarray
if img_is_xarray
else (
"Only numerical values are accepted for the `y` parameter "
"when an Image trace is used."
)
)
raise ValueError(error_msg)
if binary_string:
if zmin is None and zmax is None: # no rescaling, faster
img_rescaled = img
rescale_image = False
elif img.ndim == 2 + slice_dimensions: # single-channel image
img_rescaled = rescale_intensity(
img, in_range=(zmin[0], zmax[0]), out_range=np.uint8
)
else:
img_rescaled = np.stack(
[
rescale_intensity(
img[..., ch],
in_range=(zmin[ch], zmax[ch]),
out_range=np.uint8,
)
for ch in range(img.shape[-1])
],
axis=-1,
)
img_str = [
image_array_to_data_uri(
img_rescaled[index_tup],
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
for index_tup in itertools.product(*iterables)
]
traces = [
go.Image(source=img_str_slice, name=str(i), x0=x0, y0=y0, dx=dx, dy=dy)
for i, img_str_slice in enumerate(img_str)
]
else:
colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"
traces = [
go.Image(
z=img[index_tup],
zmin=zmin,
zmax=zmax,
colormodel=colormodel,
x0=x0,
y0=y0,
dx=dx,
dy=dy,
)
for index_tup in itertools.product(*iterables)
]
layout = {}
if origin == "lower" or (dy is not None and dy < 0):
layout["yaxis"] = dict(autorange=True)
if dx is not None and dx < 0:
layout["xaxis"] = dict(autorange="reversed")
else:
raise ValueError(
"px.imshow only accepts 2D single-channel, RGB or RGBA images. "
"An image of shape %s was provided. "
"Alternatively, 3- or 4-D single or multichannel datasets can be "
"visualized using the `facet_col` or/and `animation_frame` arguments."
% str(img.shape)
)
# Now build figure
col_labels = []
if facet_col is not None:
slice_label = (
"facet_col" if labels.get("facet_col") is None else labels["facet_col"]
)
col_labels = ["%s=%d" % (slice_label, i) for i in facet_slices]
fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])
for attr_name in ["height", "width"]:
if args[attr_name]:
layout[attr_name] = args[attr_name]
if args["title"]:
layout["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout["margin"] = {"t": 60}
frame_list = []
for index, trace in enumerate(traces):
if (facet_col and index < nrows * ncols) or index == 0:
fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)
if animation_frame is not None:
for i, index in zip(range(nslices_animation), animation_slices):
frame_list.append(
dict(
data=traces[nslices_facet * i : nslices_facet * (i + 1)],
layout=layout,
name=str(index),
)
)
if animation_frame:
fig.frames = frame_list
fig.update_layout(layout)
# Hover name, z or color
if binary_string and rescale_image and not np.all(img == img_rescaled):
# we rescaled the image, hence z is not displayed in hover since it does
# not correspond to img values
hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
)
else:
if trace["type"] == "heatmap":
hover_name = "%{z}"
elif img.ndim == 2:
hover_name = "%{z[0]}"
elif img.ndim == 3 and img.shape[-1] == 3:
hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"
else:
hover_name = "%{z}"
hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
labels["color"] or "color",
hover_name,
)
fig.update_traces(hovertemplate=hovertemplate)
if labels["x"]:
fig.update_xaxes(title_text=labels["x"], row=1)
if labels["y"]:
fig.update_yaxes(title_text=labels["y"], col=1)
configure_animation_controls(args, go.Image, fig)
fig.update_layout(template=args["template"], overwrite=True)
return fig
|
def imshow(
img,
zmin=None,
zmax=None,
origin=None,
labels={},
x=None,
y=None,
animation_frame=None,
facet_col=None,
facet_col_wrap=None,
facet_col_spacing=None,
facet_row_spacing=None,
color_continuous_scale=None,
color_continuous_midpoint=None,
range_color=None,
title=None,
template=None,
width=None,
height=None,
aspect=None,
contrast_rescaling=None,
binary_string=None,
binary_backend="auto",
binary_compression_level=4,
binary_format="png",
text_auto=False,
):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
img: array-like image, or xarray
The image data. Supported array shapes are
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values.
- (M, N, 4): an image with RGBA values, i.e. including transparency.
zmin, zmax : scalar or iterable, optional
zmin and zmax define the scalar range that the colormap covers. By default,
zmin and zmax correspond to the min and max values of the datatype for integer
datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
a multichannel image of floats, the max of the image is computed and zmax is the
smallest power of 256 (1, 255, 65535) greater than this max value,
with a 5% tolerance. For a single-channel image, the max of the image is used.
Overridden by range_color.
origin : str, 'upper' or 'lower' (default 'upper')
position of the [0, 0] pixel of the image array, in the upper left or lower left
corner. The convention 'upper' is typically used for matrices and images.
labels : dict with str keys and str values (default `{}`)
Sets names used in the figure for axis titles (keys ``x`` and ``y``),
colorbar title and hoverlabel (key ``color``). The values should correspond
to the desired label to be displayed. If ``img`` is an xarray, dimension
names are used for axis titles, and long name for the colorbar title
(unless overridden in ``labels``). Possible keys are: x, y, and color.
x, y: list-like, optional
x and y are used to label the axes of single-channel heatmap visualizations and
their lengths must match the lengths of the second and first dimensions of the
img argument. They are auto-populated if the input is an xarray.
animation_frame: int or str, optional (default None)
axis number along which the image array is sliced to create an animation plot.
If `img` is an xarray, `animation_frame` can be the name of one the dimensions.
facet_col: int or str, optional (default None)
axis number along which the image array is sliced to create a facetted plot.
If `img` is an xarray, `facet_col` can be the name of one the dimensions.
facet_col_wrap: int
Maximum number of facet columns. Wraps the column variable at this width,
so that the column facets span multiple rows.
Ignored if `facet_col` is None.
facet_col_spacing: float between 0 and 1
Spacing between facet columns, in paper units. Default is 0.02.
facet_row_spacing: float between 0 and 1
Spacing between facet rows created when ``facet_col_wrap`` is used, in
paper units. Default is 0.0.7.
color_continuous_scale : str or list of str
colormap used to map scalar data to colors (for a 2D image). This parameter is
not used for RGB or RGBA images. If a string is provided, it should be the name
of a known color scale, and if a list is provided, it should be a list of CSS-
compatible colors.
color_continuous_midpoint : number
If set, computes the bounds of the continuous color scale to have the desired
midpoint. Overridden by range_color or zmin and zmax.
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
for single-channel images.
title : str
The figure title.
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
width : number
The figure width in pixels.
height: number
The figure height in pixels.
aspect: 'equal', 'auto', or None
- 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
- 'auto': The axes is kept fixed and the aspect ratio of pixels is
adjusted so that the data fit in the axes. In general, this will
result in non-square pixels.
- if None, 'equal' is used for numpy arrays and 'auto' for xarrays
(which have typically heterogeneous coordinates)
contrast_rescaling: 'minmax', 'infer', or None
how to determine data values corresponding to the bounds of the color
range, when zmin or zmax are not passed. If `minmax`, the min and max
values of the image are used. If `infer`, a heuristic based on the image
data type is used.
binary_string: bool, default None
if True, the image data are first rescaled and encoded as uint8 and
then passed to plotly.js as a b64 PNG string. If False, data are passed
unchanged as a numerical array. Setting to True may lead to performance
gains, at the cost of a loss of precision depending on the original data
type. If None, use_binary_string is set to True for multichannel (eg) RGB
arrays, and to False for single-channel (2D) arrays. 2D arrays are
represented as grayscale and with no colorbar if use_binary_string is
True.
binary_backend: str, 'auto' (default), 'pil' or 'pypng'
Third-party package for the transformation of numpy arrays to
png b64 strings. If 'auto', Pillow is used if installed, otherwise
pypng.
binary_compression_level: int, between 0 and 9 (default 4)
png compression level to be passed to the backend when transforming an
array to a png b64 string. Increasing `binary_compression` decreases the
size of the png string, but the compression step takes more time. For most
images it is not worth using levels greater than 5, but it's possible to
test `len(fig.data[0].source)` and to time the execution of `imshow` to
tune the level of compression. 0 means no compression (not recommended).
binary_format: str, 'png' (default) or 'jpg'
compression format used to generate b64 string. 'png' is recommended
since it uses lossless compression, but 'jpg' (lossy) compression can
result if smaller binary strings for natural images.
text_auto: bool or str (default `False`)
If `True` or a string, single-channel `img` values will be displayed as text.
A string like `'.2f'` will be interpreted as a `texttemplate` numeric formatting directive.
Returns
-------
fig : graph_objects.Figure containing the displayed image
See also
--------
plotly.graph_objects.Image : image trace
plotly.graph_objects.Heatmap : heatmap trace
Notes
-----
In order to update and customize the returned figure, use
`go.Figure.update_traces` or `go.Figure.update_layout`.
If an xarray is passed, dimensions names and coordinates are used for
axes labels and ticks.
"""
args = locals()
apply_default_cascade(args)
labels = labels.copy()
nslices_facet = 1
if facet_col is not None:
if isinstance(facet_col, str):
facet_col = img.dims.index(facet_col)
nslices_facet = img.shape[facet_col]
facet_slices = range(nslices_facet)
ncols = int(facet_col_wrap) if facet_col_wrap is not None else nslices_facet
nrows = (
nslices_facet // ncols + 1
if nslices_facet % ncols
else nslices_facet // ncols
)
else:
nrows = 1
ncols = 1
if animation_frame is not None:
if isinstance(animation_frame, str):
animation_frame = img.dims.index(animation_frame)
nslices_animation = img.shape[animation_frame]
animation_slices = range(nslices_animation)
slice_dimensions = (facet_col is not None) + (
animation_frame is not None
) # 0, 1, or 2
facet_label = None
animation_label = None
img_is_xarray = False
# ----- Define x and y, set labels if img is an xarray -------------------
if xarray_imported and isinstance(img, xarray.DataArray):
dims = list(img.dims)
img_is_xarray = True
if facet_col is not None:
facet_slices = img.coords[img.dims[facet_col]].values
_ = dims.pop(facet_col)
facet_label = img.dims[facet_col]
if animation_frame is not None:
animation_slices = img.coords[img.dims[animation_frame]].values
_ = dims.pop(animation_frame)
animation_label = img.dims[animation_frame]
y_label, x_label = dims[0], dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if np.issubdtype(img.coords[ax].dtype, np.datetime64):
img.coords[ax] = img.coords[ax].astype(str)
if x is None:
x = img.coords[x_label].values
if y is None:
y = img.coords[y_label].values
if aspect is None:
aspect = "auto"
if labels.get("x", None) is None:
labels["x"] = x_label
if labels.get("y", None) is None:
labels["y"] = y_label
if labels.get("animation_frame", None) is None:
labels["animation_frame"] = animation_label
if labels.get("facet_col", None) is None:
labels["facet_col"] = facet_label
if labels.get("color", None) is None:
labels["color"] = xarray.plot.utils.label_from_attrs(img)
labels["color"] = labels["color"].replace("\n", "<br>")
else:
if hasattr(img, "columns") and hasattr(img.columns, "__len__"):
if x is None:
x = img.columns
if labels.get("x", None) is None and hasattr(img.columns, "name"):
labels["x"] = img.columns.name or ""
if hasattr(img, "index") and hasattr(img.index, "__len__"):
if y is None:
y = img.index
if labels.get("y", None) is None and hasattr(img.index, "name"):
labels["y"] = img.index.name or ""
if labels.get("x", None) is None:
labels["x"] = ""
if labels.get("y", None) is None:
labels["y"] = ""
if labels.get("color", None) is None:
labels["color"] = ""
if aspect is None:
aspect = "equal"
# --- Set the value of binary_string (forbidden for pandas)
if isinstance(img, pd.DataFrame):
if binary_string:
raise ValueError("Binary strings cannot be used with pandas arrays")
is_dataframe = True
else:
is_dataframe = False
# --------------- Starting from here img is always a numpy array --------
img = np.asanyarray(img)
# Reshape array so that animation dimension comes first, then facets, then images
if facet_col is not None:
img = np.moveaxis(img, facet_col, 0)
if animation_frame is not None and animation_frame < facet_col:
animation_frame += 1
facet_col = True
if animation_frame is not None:
img = np.moveaxis(img, animation_frame, 0)
animation_frame = True
args["animation_frame"] = (
"animation_frame"
if labels.get("animation_frame") is None
else labels["animation_frame"]
)
iterables = ()
if animation_frame is not None:
iterables += (range(nslices_animation),)
if facet_col is not None:
iterables += (range(nslices_facet),)
# Default behaviour of binary_string: True for RGB images, False for 2D
if binary_string is None:
binary_string = img.ndim >= (3 + slice_dimensions) and not is_dataframe
# Cast bools to uint8 (also one byte)
if img.dtype == np.bool:
img = 255 * img.astype(np.uint8)
if range_color is not None:
zmin = range_color[0]
zmax = range_color[1]
# -------- Contrast rescaling: either minmax or infer ------------------
if contrast_rescaling is None:
contrast_rescaling = "minmax" if img.ndim == (2 + slice_dimensions) else "infer"
# We try to set zmin and zmax only if necessary, because traces have good defaults
if contrast_rescaling == "minmax":
# When using binary_string and minmax we need to set zmin and zmax to rescale the image
if (zmin is not None or binary_string) and zmax is None:
zmax = img.max()
if (zmax is not None or binary_string) and zmin is None:
zmin = img.min()
else:
# For uint8 data and infer we let zmin and zmax to be None if passed as None
if zmax is None and img.dtype != np.uint8:
zmax = _infer_zmax_from_type(img)
if zmin is None and zmax is not None:
zmin = 0
# For 2d data, use Heatmap trace, unless binary_string is True
if img.ndim == 2 + slice_dimensions and not binary_string:
y_index = slice_dimensions
if y is not None and img.shape[y_index] != len(y):
raise ValueError(
"The length of the y vector must match the length of the first "
+ "dimension of the img matrix."
)
x_index = slice_dimensions + 1
if x is not None and img.shape[x_index] != len(x):
raise ValueError(
"The length of the x vector must match the length of the second "
+ "dimension of the img matrix."
)
if text_auto is True:
texttemplate = "%{z}"
elif text_auto is not False:
texttemplate = "%{z:" + text_auto + "}"
traces = [
go.Heatmap(
x=x,
y=y,
z=img[index_tup],
coloraxis="coloraxis1",
name=str(i),
texttemplate=texttemplate,
)
for i, index_tup in enumerate(itertools.product(*iterables))
]
autorange = True if origin == "lower" else "reversed"
layout = dict(yaxis=dict(autorange=autorange))
if aspect == "equal":
layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
layout["yaxis"]["constrain"] = "domain"
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=color_continuous_midpoint,
cmin=zmin,
cmax=zmax,
)
if labels["color"]:
layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
# For 2D+RGB data, use Image trace
elif (
img.ndim >= 3
and (img.shape[-1] in [3, 4] or slice_dimensions and binary_string)
) or (img.ndim == 2 and binary_string):
rescale_image = True # to check whether image has been modified
if zmin is not None and zmax is not None:
zmin, zmax = (
_vectorize_zvalue(zmin, mode="min"),
_vectorize_zvalue(zmax, mode="max"),
)
x0, y0, dx, dy = (None,) * 4
error_msg_xarray = (
"Non-numerical coordinates were passed with xarray `img`, but "
"the Image trace cannot handle it. Please use `binary_string=False` "
"for 2D data or pass instead the numpy array `img.values` to `px.imshow`."
)
if x is not None:
x = np.asanyarray(x)
if np.issubdtype(x.dtype, np.number):
x0 = x[0]
dx = x[1] - x[0]
else:
error_msg = (
error_msg_xarray
if img_is_xarray
else (
"Only numerical values are accepted for the `x` parameter "
"when an Image trace is used."
)
)
raise ValueError(error_msg)
if y is not None:
y = np.asanyarray(y)
if np.issubdtype(y.dtype, np.number):
y0 = y[0]
dy = y[1] - y[0]
else:
error_msg = (
error_msg_xarray
if img_is_xarray
else (
"Only numerical values are accepted for the `y` parameter "
"when an Image trace is used."
)
)
raise ValueError(error_msg)
if binary_string:
if zmin is None and zmax is None: # no rescaling, faster
img_rescaled = img
rescale_image = False
elif img.ndim == 2 + slice_dimensions: # single-channel image
img_rescaled = rescale_intensity(
img, in_range=(zmin[0], zmax[0]), out_range=np.uint8
)
else:
img_rescaled = np.stack(
[
rescale_intensity(
img[..., ch],
in_range=(zmin[ch], zmax[ch]),
out_range=np.uint8,
)
for ch in range(img.shape[-1])
],
axis=-1,
)
img_str = [
image_array_to_data_uri(
img_rescaled[index_tup],
backend=binary_backend,
compression=binary_compression_level,
ext=binary_format,
)
for index_tup in itertools.product(*iterables)
]
traces = [
go.Image(source=img_str_slice, name=str(i), x0=x0, y0=y0, dx=dx, dy=dy)
for i, img_str_slice in enumerate(img_str)
]
else:
colormodel = "rgb" if img.shape[-1] == 3 else "rgba256"
traces = [
go.Image(
z=img[index_tup],
zmin=zmin,
zmax=zmax,
colormodel=colormodel,
x0=x0,
y0=y0,
dx=dx,
dy=dy,
)
for index_tup in itertools.product(*iterables)
]
layout = {}
if origin == "lower" or (dy is not None and dy < 0):
layout["yaxis"] = dict(autorange=True)
if dx is not None and dx < 0:
layout["xaxis"] = dict(autorange="reversed")
else:
raise ValueError(
"px.imshow only accepts 2D single-channel, RGB or RGBA images. "
"An image of shape %s was provided. "
"Alternatively, 3- or 4-D single or multichannel datasets can be "
"visualized using the `facet_col` or/and `animation_frame` arguments."
% str(img.shape)
)
# Now build figure
col_labels = []
if facet_col is not None:
slice_label = (
"facet_col" if labels.get("facet_col") is None else labels["facet_col"]
)
col_labels = ["%s=%d" % (slice_label, i) for i in facet_slices]
fig = init_figure(args, "xy", [], nrows, ncols, col_labels, [])
for attr_name in ["height", "width"]:
if args[attr_name]:
layout[attr_name] = args[attr_name]
if args["title"]:
layout["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout["margin"] = {"t": 60}
frame_list = []
for index, trace in enumerate(traces):
if (facet_col and index < nrows * ncols) or index == 0:
fig.add_trace(trace, row=nrows - index // ncols, col=index % ncols + 1)
if animation_frame is not None:
for i, index in zip(range(nslices_animation), animation_slices):
frame_list.append(
dict(
data=traces[nslices_facet * i : nslices_facet * (i + 1)],
layout=layout,
name=str(index),
)
)
if animation_frame:
fig.frames = frame_list
fig.update_layout(layout)
# Hover name, z or color
if binary_string and rescale_image and not np.all(img == img_rescaled):
# we rescaled the image, hence z is not displayed in hover since it does
# not correspond to img values
hovertemplate = "%s: %%{x}<br>%s: %%{y}<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
)
else:
if trace["type"] == "heatmap":
hover_name = "%{z}"
elif img.ndim == 2:
hover_name = "%{z[0]}"
elif img.ndim == 3 and img.shape[-1] == 3:
hover_name = "[%{z[0]}, %{z[1]}, %{z[2]}]"
else:
hover_name = "%{z}"
hovertemplate = "%s: %%{x}<br>%s: %%{y}<br>%s: %s<extra></extra>" % (
labels["x"] or "x",
labels["y"] or "y",
labels["color"] or "color",
hover_name,
)
fig.update_traces(hovertemplate=hovertemplate)
if labels["x"]:
fig.update_xaxes(title_text=labels["x"], row=1)
if labels["y"]:
fig.update_yaxes(title_text=labels["y"], col=1)
configure_animation_controls(args, go.Image, fig)
fig.update_layout(template=args["template"], overwrite=True)
return fig
|
59,793 |
def __getattr__(name):
if name in deprecated_names:
warnings.filterwarnings("default", category=DeprecationWarning)
warning_msg = f"{name} is DEPRECATED and will be removed in a future release of the OpenFF Toolkit."
warnings.warn(warning_msg, DeprecationWarning)
if name == "ParseError":
from openff.toolkit.utils.exceptions import _DeprecatedParseError
return _DeprecatedParseError
raise AttributeError(f"module {__name__} has no attribute {name}")
|
def __getattr__(name):
if name in deprecated_names:
warnings.filterwarnings("default", category=DeprecationWarning)
warning_msg = f"{name} is DEPRECATED and will be removed in a future release of the OpenFF Toolkit."
warnings.warn(warning_msg, DeprecationWarning)
if name == "ParseError":
from openff.toolkit.utils.exceptions import _DeprecatedParseError
return _DeprecatedParseError
raise AttributeError(f"module {__name__} has no attribute {name}")
|
26,263 |
def format_rows_number(rows_number):
for unit in ['', 'k', 'm', 'b', 't']:
if abs(rows_number) < 1000.0:
return f"{rows_number:3.1f} {unit}".strip()
rows_number /= 1000.0
rows_number *= 1000.0
return f"{rows_number:3.1f} {unit}".strip()
|
def format_rows_number(rows_number):
for unit in ['', 'k', 'm', 'b', 't']:
if abs(rows_number) < 1000.0:
return f"{rows_number:3.1f} {unit}".strip()
rows_number /= 1000.0
rows_number *= 1000.0
return f"{rows_number:3.1f}{unit}".strip()
|
58,586 |
def prepare_dataset_shard(tf_dataset_shard: tf.data.Dataset):
""" A utility function that disables Tensorflow autosharding
since the dataset has already been sharded.
Args:
tf_dataset_shard (tf.data.Dataset): A TensorFlow Dataset.
Returns:
A TensorFlow Dataset with autosharding turned off.
"""
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
return tf_dataset_shard.with_options(options)
|
def prepare_dataset_shard(tf_dataset_shard: tf.data.Dataset):
""" A utility function that disables Tensorflow autosharding.
This should be used on a TensorFlow ``Dataset`` created by calling ``to_tf()``
on a ``ray.data.Dataset`` returned by ``ray.train.get_dataset_shard()`` since
the dataset has already been sharded across the workers.
Args:
tf_dataset_shard (tf.data.Dataset): A TensorFlow Dataset.
Returns:
A TensorFlow Dataset with autosharding turned off.
"""
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
return tf_dataset_shard.with_options(options)
|
54,473 |
def check_distribution_compatibility(
dist_old: BaseDistribution, dist_new: BaseDistribution
) -> None:
"""A function to check compatibility of two distributions.
Note that this method is not supposed to be called by library users.
Args:
dist_old: A distribution previously recorded in storage.
dist_new: A distribution newly added to storage.
Returns:
True denotes given distributions are compatible. Otherwise, they are not.
Raises:
ValueError:
If different distribution kind is set to ``dist_old`` and ``dist_new``,
or ``dist_old.choices`` doesn't match ``dist_new.choices``.
"""
if dist_old.__class__ != dist_new.__class__:
raise ValueError("Cannot set different distribution kind to the same parameter name.")
if not isinstance(dist_old, CategoricalDistribution):
return
if not isinstance(dist_new, CategoricalDistribution):
return
if dist_old.choices != dist_new.choices:
raise ValueError(
CategoricalDistribution.__name__ + " does not support dynamic value space."
)
|
def check_distribution_compatibility(
dist_old: BaseDistribution, dist_new: BaseDistribution
) -> None:
"""A function to check compatibility of two distributions.
Note that this method is not supposed to be called by library users.
Args:
dist_old: A distribution previously recorded in storage.
dist_new: A distribution newly added to storage.
Returns:
True denotes given distributions are compatible. Otherwise, they are not.
Raises:
ValueError:
If different distribution kind is set to ``dist_old`` and ``dist_new``,
or ``dist_old.choices`` doesn't match ``dist_new.choices``
for :class:`~optuna.distributions.CategoricalDistribution`.
"""
if dist_old.__class__ != dist_new.__class__:
raise ValueError("Cannot set different distribution kind to the same parameter name.")
if not isinstance(dist_old, CategoricalDistribution):
return
if not isinstance(dist_new, CategoricalDistribution):
return
if dist_old.choices != dist_new.choices:
raise ValueError(
CategoricalDistribution.__name__ + " does not support dynamic value space."
)
|
32,588 |
def getexternalservices_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
asm-getexternalservices command: Returns list of external services.
Args:
client (Client): CortexAttackSurfaceManagment client to use.
args (dict): all command arguments, usually passed from ``demisto.args()``.
``args['ip_address']`` IP Address to search on.
``args['domain']`` Domain to search on.
``args['is_active']`` If the service active or not.
``args['discovery_type']`` how service was discovered.
Returns:
CommandResults: A ``CommandResults`` object that is then passed to ``return_results``, that contains external services.
"""
ip_address = args.get('ip_address')
domain = args.get('domain')
is_active = args.get('is_active')
discovery_type = args.get('discovery_type')
# create list of search parameters or pass empty list.
search_params = []
if ip_address:
search_params.append({"field": "ip_address", "operator": "eq", "value": ip_address})
if domain:
search_params.append({"field": "domain", "operator": "contains", "value": domain})
if is_active:
search_params.append({"field": "is_active", "operator": "in", "value": [is_active]})
if discovery_type:
search_params.append({"field": "discovery_type", "operator": "in", "value": [discovery_type]})
response = client.getexternalservices_request(search_params)
parsed = response['reply']['external_services']
markdown = tableToMarkdown('External Services', parsed, removeNull=True)
command_results = CommandResults(
outputs_prefix='ASM.GetExternalServices',
outputs_key_field='service_id',
outputs=parsed,
raw_response=parsed,
readable_output=markdown
)
return command_results
|
def getexternalservices_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
asm-getexternalservices command: Returns list of external services.
Args:
client (Client): CortexAttackSurfaceManagment client to use.
args (dict): all command arguments, usually passed from ``demisto.args()``.
``args['ip_address']`` IP Address to search on.
``args['domain']`` Domain to search on.
``args['is_active']`` If the service active or not.
``args['discovery_type']`` how service was discovered.
Returns:
CommandResults: A ``CommandResults`` object that is then passed to ``return_results``, that contains external services.
"""
ip_address = args.get('ip_address')
domain = args.get('domain')
is_active = args.get('is_active')
discovery_type = args.get('discovery_type')
# create list of search parameters or pass empty list.
search_params = []
if ip_address:
search_params.append({"field": "ip_address", "operator": "eq", "value": ip_address})
if domain:
search_params.append({"field": "domain", "operator": "contains", "value": domain})
if is_active:
search_params.append({"field": "is_active", "operator": "in", "value": [is_active]})
if discovery_type:
search_params.append({"field": "discovery_type", "operator": "in", "value": [discovery_type]})
response = client.getexternalservices_request(search_params)
parsed = response.get('reply', {}).get('external_services')
markdown = tableToMarkdown('External Services', parsed, removeNull=True)
command_results = CommandResults(
outputs_prefix='ASM.GetExternalServices',
outputs_key_field='service_id',
outputs=parsed,
raw_response=parsed,
readable_output=markdown
)
return command_results
|
53,866 |
def _validate_trusted_launch(namespace):
if not namespace.security_type or namespace.security_type.lower() != 'trustedlaunch':
return
if not namespace.enable_vtpm or not namespace.enable_secure_boot:
logger.warning('It is recommended to specify "--enable-secure-boot True" and "--enable-vtpm True" to receive'
' the full suite of security features that comes with Trusted Launch. Breaking change:'
' --enable-secure-boot will be enabled by default in Microsoft Build and Ignite')
if not namespace.enable_vtpm:
namespace.enable_vtpm = True
|
def _validate_trusted_launch(namespace):
if not namespace.security_type or namespace.security_type.lower() != 'trustedlaunch':
return
if not namespace.enable_vtpm or not namespace.enable_secure_boot:
logger.warning('It is recommended to specify "--enable-secure-boot True" and "--enable-vtpm True" to receive'
' the full suite of security features that comes with Trusted Launch. Please note that the "--enable-secure-boot" will be enabled by default in Microsoft Ignite Event (around November)')
if not namespace.enable_vtpm:
namespace.enable_vtpm = True
|
27,987 |
def main(args):
"""
List the checkers available in the specified (or all supported) analyzers
alongside with their description or enabled status in various formats.
"""
# If the given output format is not 'table', redirect logger's output to
# the stderr.
logger.setup_logger(args.verbose if 'verbose' in args else None,
None if args.output_format == 'table' else 'stderr')
context = analyzer_context.get_context()
working_analyzers, errored = analyzer_types.check_supported_analyzers(
args.analyzers,
context)
analyzer_environment = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_config_map = analyzer_types.build_config_handlers(
args, context, working_analyzers)
def uglify(text):
"""
csv and json format output contain this non human readable header
string: no CamelCase and no space.
"""
return text.lower().replace(' ', '_')
# List available checker profiles.
if 'profile' in args and args.profile == 'list':
if 'details' in args:
header = ['Profile name', 'Description']
rows = context.available_profiles.items()
else:
header = ['Profile name']
rows=[]
for key in context.available_profiles.keys():
rows.append((key,""))
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
return
# List checker config options.
if 'checker_config' in args:
if 'details' in args:
header = ['Option', 'Description']
else:
header = ['Option']
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
rows = []
for analyzer in working_analyzers:
config_handler = analyzer_config_map.get(analyzer)
analyzer_class = analyzer_types.supported_analyzers[analyzer]
configs = analyzer_class.get_checker_config(config_handler,
analyzer_environment)
rows.extend((':'.join((analyzer, c[0])), c[1]) if 'details' in args
else (':'.join((analyzer, c[0])),) for c in configs)
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
return
# List available checkers.
if 'details' in args:
header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Description']
else:
header = ['Name']
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
rows = []
for analyzer in working_analyzers:
config_handler = analyzer_config_map.get(analyzer)
analyzer_class = analyzer_types.supported_analyzers[analyzer]
checkers = analyzer_class.get_analyzer_checkers(config_handler,
analyzer_environment)
default_checker_cfg = context.checker_config.get(
analyzer + '_checkers')
profile_checkers = None
if 'profile' in args:
if args.profile not in context.available_profiles:
LOG.error("Checker profile '%s' does not exist!",
args.profile)
LOG.error("To list available profiles, use '--profile list'.")
sys.exit(1)
profile_checkers = [(args.profile, True)]
config_handler.initialize_checkers(context.available_profiles,
context.package_root,
checkers,
default_checker_cfg,
profile_checkers)
for checker_name, value in config_handler.checks().items():
state, description = value
if state != CheckerState.enabled and 'profile' in args:
continue
if state == CheckerState.enabled and 'only_disabled' in args:
continue
elif state != CheckerState.enabled and 'only_enabled' in args:
continue
if args.output_format == 'json':
state = state == CheckerState.enabled
else:
state = '+' if state == CheckerState.enabled else '-'
if 'details' in args:
severity = context.severity_map.get(checker_name)
rows.append([state, checker_name, analyzer,
severity, description])
else:
rows.append([checker_name])
if 'show_warnings' in args:
severity = context.severity_map.get('clang-diagnostic-')
for warning in get_warnings(analyzer_environment):
if 'details' in args:
rows.append(['', warning, '-', severity, '-'])
else:
rows.append([warning])
if rows:
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
for analyzer_binary, reason in errored:
LOG.error("Failed to get checkers for '%s'!"
"The error reason was: '%s'", analyzer_binary, reason)
LOG.error("Please check your installation and the "
"'config/package_layout.json' file!")
|
def main(args):
"""
List the checkers available in the specified (or all supported) analyzers
alongside with their description or enabled status in various formats.
"""
# If the given output format is not 'table', redirect logger's output to
# the stderr.
logger.setup_logger(args.verbose if 'verbose' in args else None,
None if args.output_format == 'table' else 'stderr')
context = analyzer_context.get_context()
working_analyzers, errored = analyzer_types.check_supported_analyzers(
args.analyzers,
context)
analyzer_environment = env.extend(context.path_env_extra,
context.ld_lib_path_extra)
analyzer_config_map = analyzer_types.build_config_handlers(
args, context, working_analyzers)
def uglify(text):
"""
csv and json format output contain this non human readable header
string: no CamelCase and no space.
"""
return text.lower().replace(' ', '_')
# List available checker profiles.
if 'profile' in args and args.profile == 'list':
if 'details' in args:
header = ['Profile name', 'Description']
rows = context.available_profiles.items()
else:
header = ['Profile name']
rows = [(key, "") for key in context.available_profiles.keys()]
for key in context.available_profiles.keys():
rows.append((key,""))
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
return
# List checker config options.
if 'checker_config' in args:
if 'details' in args:
header = ['Option', 'Description']
else:
header = ['Option']
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
rows = []
for analyzer in working_analyzers:
config_handler = analyzer_config_map.get(analyzer)
analyzer_class = analyzer_types.supported_analyzers[analyzer]
configs = analyzer_class.get_checker_config(config_handler,
analyzer_environment)
rows.extend((':'.join((analyzer, c[0])), c[1]) if 'details' in args
else (':'.join((analyzer, c[0])),) for c in configs)
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
return
# List available checkers.
if 'details' in args:
header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Description']
else:
header = ['Name']
if args.output_format in ['csv', 'json']:
header = list(map(uglify, header))
rows = []
for analyzer in working_analyzers:
config_handler = analyzer_config_map.get(analyzer)
analyzer_class = analyzer_types.supported_analyzers[analyzer]
checkers = analyzer_class.get_analyzer_checkers(config_handler,
analyzer_environment)
default_checker_cfg = context.checker_config.get(
analyzer + '_checkers')
profile_checkers = None
if 'profile' in args:
if args.profile not in context.available_profiles:
LOG.error("Checker profile '%s' does not exist!",
args.profile)
LOG.error("To list available profiles, use '--profile list'.")
sys.exit(1)
profile_checkers = [(args.profile, True)]
config_handler.initialize_checkers(context.available_profiles,
context.package_root,
checkers,
default_checker_cfg,
profile_checkers)
for checker_name, value in config_handler.checks().items():
state, description = value
if state != CheckerState.enabled and 'profile' in args:
continue
if state == CheckerState.enabled and 'only_disabled' in args:
continue
elif state != CheckerState.enabled and 'only_enabled' in args:
continue
if args.output_format == 'json':
state = state == CheckerState.enabled
else:
state = '+' if state == CheckerState.enabled else '-'
if 'details' in args:
severity = context.severity_map.get(checker_name)
rows.append([state, checker_name, analyzer,
severity, description])
else:
rows.append([checker_name])
if 'show_warnings' in args:
severity = context.severity_map.get('clang-diagnostic-')
for warning in get_warnings(analyzer_environment):
if 'details' in args:
rows.append(['', warning, '-', severity, '-'])
else:
rows.append([warning])
if rows:
print(output_formatters.twodim_to_str(args.output_format,
header, rows))
for analyzer_binary, reason in errored:
LOG.error("Failed to get checkers for '%s'!"
"The error reason was: '%s'", analyzer_binary, reason)
LOG.error("Please check your installation and the "
"'config/package_layout.json' file!")
|
6,183 |
def updateInstance(version, hosts, retry):
"""
Update each server of an instance and restart them
:param version: version vArBpC you want to update to
:param hosts: list of hosts to be updated
:param retry: number of retry attempts on hosts that have failed to update
"""
result = updateHosts(version, hosts)
if not result['OK']:
return result
updateSuccess = result['Value'][0]
updateFail = result['Value'][1]
restartSuccess = []
restartFail = []
for host in updateSuccess:
result = restartHost(host)
if result['OK']:
restartSuccess.append(host)
else:
restartFail.append(host)
if not restartFail and not updateFail:
return S_OK("Successfully updated and restarted all hosts")
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
gLogger.notice("XXXXX There were problems in the update process XXXXX")
gLogger.notice("Succeeded to update:")
for host in updateSuccess:
gLogger.notice(" + %s" % host)
gLogger.notice("Succeeded to restart:")
for host in restartSuccess:
gLogger.notice(" + %s" % host)
gLogger.notice("Failed to update:")
for host in updateFail:
gLogger.notice(" - %s" % host)
gLogger.notice("Failed to restart:")
for host in restartFail:
gLogger.notice(" - %s" % host)
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
if retry > 0:
retryHosts = list(set(updateFail + restartFail))
gLogger.notice("Retrying update on (%s atempts remaining):" % retry)
for host in retryHosts:
gLogger.notice(" - %s" % host)
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
return updateInstance(version, retryHosts, retry - 1)
return S_ERROR("Update failed!")
|
def updateInstance(version, hosts, retry):
"""
Update each server of an instance and restart them
:param version: version vArBpC you want to update to
:param hosts: list of hosts to be updated
:param int retry: number of retry attempts on hosts that have failed to update
"""
result = updateHosts(version, hosts)
if not result['OK']:
return result
updateSuccess = result['Value'][0]
updateFail = result['Value'][1]
restartSuccess = []
restartFail = []
for host in updateSuccess:
result = restartHost(host)
if result['OK']:
restartSuccess.append(host)
else:
restartFail.append(host)
if not restartFail and not updateFail:
return S_OK("Successfully updated and restarted all hosts")
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
gLogger.notice("XXXXX There were problems in the update process XXXXX")
gLogger.notice("Succeeded to update:")
for host in updateSuccess:
gLogger.notice(" + %s" % host)
gLogger.notice("Succeeded to restart:")
for host in restartSuccess:
gLogger.notice(" + %s" % host)
gLogger.notice("Failed to update:")
for host in updateFail:
gLogger.notice(" - %s" % host)
gLogger.notice("Failed to restart:")
for host in restartFail:
gLogger.notice(" - %s" % host)
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
if retry > 0:
retryHosts = list(set(updateFail + restartFail))
gLogger.notice("Retrying update on (%s atempts remaining):" % retry)
for host in retryHosts:
gLogger.notice(" - %s" % host)
gLogger.notice("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
return updateInstance(version, retryHosts, retry - 1)
return S_ERROR("Update failed!")
|
5,858 |
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3 was removed because we have no obligation to maintain support
# for manual invocation of private methods
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
|
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3 was removed because we have no obligation to maintain support
# for user invocation of private methods
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
|
17,438 |
def test_mean_dtype():
ds = Dataset()
ds["pos"] = [1, 2, 3]
ds["data"] = ("pos", "time"), [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]
ds["var"] = "pos", [2, 3, 4]
ds2 = ds.mean(dim="time")
assert all(ds2["var"] == ds["var"])
assert ds2["var"].dtype == ds["var"].dtype
|
def test_mean_dtype():
ds = Dataset()
ds["pos"] = [1, 2, 3]
ds["data"] = ("pos", "time"), [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]
ds["var"] = "pos", [2, 3, 4]
result = ds.mean(dim="time")
assert all(ds2["var"] == ds["var"])
assert ds2["var"].dtype == ds["var"].dtype
|
2,217 |
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
with pytest.error(ValueError):
theil_sen.fit(X, y)
|
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
|
30,549 |
def filter_obj(obj: dict, is_att: bool = True) -> dict:
""" Filter Event, Attribute
Args:
obj(dict): dictionary MISP event or attribute.
is_att: True if dict represent Attribute otherwise Event (False)
Returns:
dict: Filtered dict by configuration.
"""
metadata_state = demisto.params().get('metadata')
related_events_state = demisto.params().get('related_events')
selected_keys: List[str] = [item.lower() for item in demisto.params().get('context_select')]
ignored_keys_metadata: List[str] = ["Galaxy", "Tag"]
ignored_keys_related_events: List[str] = ["RelatedEvent"]
for dict_key in list(obj.keys()):
if dict_key in ignored_keys_metadata:
if not metadata_state:
obj[dict_key] = []
else:
continue
elif dict_key in ignored_keys_related_events:
if not related_events_state:
obj[dict_key] = []
else:
continue
elif is_att and selected_keys and dict_key not in selected_keys:
obj.pop(dict_key)
return obj
|
def filter_obj(obj: dict, is_att: bool = True) -> dict:
""" Filter Event, Attribute
Args:
obj(dict): dictionary MISP event or attribute.
is_att: True if dict represent Attribute otherwise Event (False)
Returns:
dict: Filtered dict by configuration.
"""
metadata_state = demisto.params().get('metadata')
related_events_state = demisto.params().get('related_events')
selected_keys: List[str] = [item.lower() for item in demisto.params().get('context_select') or []]
ignored_keys_metadata: List[str] = ["Galaxy", "Tag"]
ignored_keys_related_events: List[str] = ["RelatedEvent"]
for dict_key in list(obj.keys()):
if dict_key in ignored_keys_metadata:
if not metadata_state:
obj[dict_key] = []
else:
continue
elif dict_key in ignored_keys_related_events:
if not related_events_state:
obj[dict_key] = []
else:
continue
elif is_att and selected_keys and dict_key not in selected_keys:
obj.pop(dict_key)
return obj
|
46,447 |
def concatenate(series: Sequence['TimeSeries'],
axis: Union[str, int] = 0,
ignore_time_axes: bool = False):
"""Concatenates multiple ``TimeSeries`` along a given axis.
``axis`` can be an integer in (0, 1, 2) to denote (time, component, sample) or, alternatively,
a string denoting the corresponding dimension of the underlying ``DataArray``.
Parameters
----------
series : Sequence[TimeSeries]
sequence of ``TimeSeries`` to concatenate
axis : Union[str, int]
axis along which the series will be concatenated.
ignore_time_axes : bool
Allow concatenation even when some series do not have matching time axes.
When done along component or sample dimensions, concatenation will work as long as the series
have the same lengths (in this case the resulting series will have the time axis of the first
provided series). When done along time dimension, concatenation will work even if the time axes
are not contiguous (in this case, the resulting series will have a start time matching the start time
of the first provided series). Default: False.
Returns
-------
TimeSeries
concatenated series
"""
time_dims = [ts.time_dim for ts in series]
if isinstance(axis, str):
if axis == DIMS[1]:
axis = 1
elif axis == DIMS[2]:
axis = 2
else:
raise_if_not(len(set(time_dims)) == 1 and axis == time_dims[0],
'Unrecognised `axis` name. If `axis` denotes the time axis, all provided '
'series must have the same time axis name (if that is not the case, try providing '
'`axis=0` to concatenate along time dimension).')
axis = 0
time_dim_name = time_dims[0] # At this point all series are supposed to have same time dim name
da_sequence = [ts.data_array(copy=False) for ts in series]
component_axis_equal = len(set([ts.width for ts in series])) == 1
sample_axis_equal = len(set([ts.n_samples for ts in series])) == 1
if axis == 0:
# time
raise_if((axis == 0 and not (component_axis_equal and sample_axis_equal)),
'when concatenating along time dimension, the component and sample dimensions of all '
'provided series must match.')
da_concat = xr.concat(da_sequence, dim=time_dim_name)
# check, if timeseries are consecutive
consecutive_time_axes = True
for i in range(1, len(series)):
if series[i - 1].end_time() + series[0].freq != \
series[i].start_time():
consecutive_time_axes = False
break
if not consecutive_time_axes:
raise_if_not(ignore_time_axes, "When concatenating over time axis, all series need to be contiguous"
"in the time dimension. Use `ignore_time_axis=True` to override "
"this behavior and concatenate the series by extending the time axis "
"of the first series.")
if series[0].has_datetime_index:
tindex = pd.date_range(series[0].start_time(),
freq=series[0].freq,
periods=da_concat.shape[0])
else:
tindex = pd.RangeIndex(start=series[0].start_time(),
stop=series[0].start_time() + da_concat.shape[0],
step=1)
da_concat = da_concat.assign_coords({time_dim_name: tindex})
else:
time_axes_equal = all(list(map(lambda t: t[0].has_same_time_as(t[1]), zip(series[0:-1], series[1:]))))
time_axes_ok = (time_axes_equal if not ignore_time_axes else len(set([len(ts) for ts in series])) == 1)
raise_if_not((time_axes_ok and (axis == 1 and sample_axis_equal) or (axis == 2 and component_axis_equal)),
'When concatenating along component or sample dimensions, all the series must have the same time '
'axes (unless `ignore_time_axes` is True), or time axes of same lengths (if `ignore_time_axes` is '
'True), and all series must have the same number of samples (if concatenating along component '
'dimension), or the same number of components (if concatenating along sample dimension).')
# we concatenate raw values using Numpy because not all series might have the same time axes
# and joining using xarray.concatenate() won't work in some cases
concat_vals = np.concatenate([da.values for da in da_sequence], axis=axis)
if axis == 1:
# when concatenating along component dimension, we have to re-create a component index
component_coords = []
existing_components = set()
for i, ts in enumerate(series):
for comp in ts.components:
if comp not in existing_components:
component_coords.append(comp)
existing_components.add(comp)
else:
new_comp_name = '{}_{}'.format(i, comp)
component_coords.append(new_comp_name)
existing_components.add(new_comp_name)
component_index = pd.Index(component_coords)
else:
component_index = da_sequence[0].get_index(DIMS[1])
da_concat = xr.DataArray(concat_vals,
dims=(time_dim_name,) + DIMS[-2:],
coords={time_dim_name: series[0].time_index, DIMS[1]: component_index})
return TimeSeries(da_concat)
|
def concatenate(series: Sequence['TimeSeries'],
axis: Union[str, int] = 0,
ignore_time_axes: bool = False):
"""Concatenates multiple ``TimeSeries`` along a given axis.
``axis`` can be an integer in (0, 1, 2) to denote (time, component, sample) or, alternatively,
a string denoting the corresponding dimension of the underlying ``DataArray``.
Parameters
----------
series : Sequence[TimeSeries]
sequence of ``TimeSeries`` to concatenate
axis : Union[str, int]
axis along which the series will be concatenated.
ignore_time_axes : bool
Allow concatenation even when some series do not have matching time axes.
When done along component or sample dimensions, concatenation will work as long as the series
have the same lengths (in this case the resulting series will have the time axis of the first
provided series). When done along time dimension, concatenation will work even if the time axes
are not contiguous (in this case, the resulting series will have a start time matching the start time
of the first provided series). Default: False.
Returns
-------
TimeSeries
concatenated series
"""
time_dims = [ts.time_dim for ts in series]
if isinstance(axis, str):
if axis == DIMS[1]:
axis = 1
elif axis == DIMS[2]:
axis = 2
else:
raise_if_not(len(set(time_dims)) == 1 and axis == time_dims[0],
'Unrecognised `axis` name. If `axis` denotes the time axis, all provided '
'series must have the same time axis name (if that is not the case, try providing '
'`axis=0` to concatenate along time dimension).')
axis = 0
time_dim_name = time_dims[0] # At this point all series are supposed to have same time dim name
da_sequence = [ts.data_array(copy=False) for ts in series]
component_axis_equal = len(set([ts.width for ts in series])) == 1
sample_axis_equal = len(set([ts.n_samples for ts in series])) == 1
if axis == 0:
# time
raise_if((axis == 0 and not (component_axis_equal and sample_axis_equal)),
'when concatenating along time dimension, the component and sample dimensions of all '
'provided series must match.')
da_concat = xr.concat(da_sequence, dim=time_dim_name)
# check, if timeseries are consecutive
consecutive_time_axes = True
for i in range(1, len(series)):
if series[i - 1].end_time() + series[0].freq != \
series[i].start_time():
consecutive_time_axes = False
break
if not consecutive_time_axes:
raise_if_not(ignore_time_axes, "When concatenating over time axis, all series need to be contiguous"
"in the time dimension. Use `ignore_time_axes=True` to override "
"this behavior and concatenate the series by extending the time axis "
"of the first series.")
if series[0].has_datetime_index:
tindex = pd.date_range(series[0].start_time(),
freq=series[0].freq,
periods=da_concat.shape[0])
else:
tindex = pd.RangeIndex(start=series[0].start_time(),
stop=series[0].start_time() + da_concat.shape[0],
step=1)
da_concat = da_concat.assign_coords({time_dim_name: tindex})
else:
time_axes_equal = all(list(map(lambda t: t[0].has_same_time_as(t[1]), zip(series[0:-1], series[1:]))))
time_axes_ok = (time_axes_equal if not ignore_time_axes else len(set([len(ts) for ts in series])) == 1)
raise_if_not((time_axes_ok and (axis == 1 and sample_axis_equal) or (axis == 2 and component_axis_equal)),
'When concatenating along component or sample dimensions, all the series must have the same time '
'axes (unless `ignore_time_axes` is True), or time axes of same lengths (if `ignore_time_axes` is '
'True), and all series must have the same number of samples (if concatenating along component '
'dimension), or the same number of components (if concatenating along sample dimension).')
# we concatenate raw values using Numpy because not all series might have the same time axes
# and joining using xarray.concatenate() won't work in some cases
concat_vals = np.concatenate([da.values for da in da_sequence], axis=axis)
if axis == 1:
# when concatenating along component dimension, we have to re-create a component index
component_coords = []
existing_components = set()
for i, ts in enumerate(series):
for comp in ts.components:
if comp not in existing_components:
component_coords.append(comp)
existing_components.add(comp)
else:
new_comp_name = '{}_{}'.format(i, comp)
component_coords.append(new_comp_name)
existing_components.add(new_comp_name)
component_index = pd.Index(component_coords)
else:
component_index = da_sequence[0].get_index(DIMS[1])
da_concat = xr.DataArray(concat_vals,
dims=(time_dim_name,) + DIMS[-2:],
coords={time_dim_name: series[0].time_index, DIMS[1]: component_index})
return TimeSeries(da_concat)
|
3,267 |
def make_trend(store_event, project_id, event, name, first_duration, second_duration, number_transactions=2, period_mins=60):
for i in range(number_transactions):
time_between = period_mins / number_transactions
minutes = period_mins - ((i + 1) * time_between) + (time_between / 2)
if (i < (number_transactions / 2)):
event_start = before_now(minutes=minutes, seconds=first_duration)
else:
event_start = before_now(minutes=minutes, seconds=second_duration)
event_end = before_now(minutes=minutes)
transaction = make_nth_transaction(event, name, i, event_start, event_end)
store_event(data=transaction, project_id=project_id)
|
def make_trend(store_event, project_id, event, name, first_duration, second_duration, number_transactions=2, period_mins=60):
for i in range(number_transactions):
time_between = period_mins / number_transactions
minutes = period_mins - ((i + 1) * time_between) + (time_between / 2)
if i < (number_transactions / 2):
event_start = before_now(minutes=minutes, seconds=first_duration)
else:
event_start = before_now(minutes=minutes, seconds=second_duration)
event_end = before_now(minutes=minutes)
transaction = make_nth_transaction(event, name, i, event_start, event_end)
store_event(data=transaction, project_id=project_id)
|
47,551 |
def tf_shard_checkpoint(weights, max_shard_size="10GB"):
"""
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
given size.
The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
[6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
<Tip warning={true}>
If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
have a size greater than `max_shard_size`.
</Tip>
Args:
state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
(like `"5MB"`).
"""
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
shards[shard_file] = shard
for weight in shard:
# remove the class name from the layer name for smooth loading
# this could be removed only if the loading is purely based on indexes
# and not layer names
# weight_name = "/".join(weight.name.split("/")[1:])
weight_name = weight.name
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
|
def tf_shard_checkpoint(weights, max_shard_size="10GB"):
"""
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
given size.
The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
[6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
<Tip warning={true}>
If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
have a size greater than `max_shard_size`.
</Tip>
Args:
state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
(like `"5MB"`).
"""
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = []
current_block_size = 0
total_size = 0
for item in weights:
weight_size = item.numpy().size * dtype_byte_size(item.dtype)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
sharded_state_dicts.append(current_block)
current_block = []
current_block_size = 0
current_block.append(item)
current_block_size += weight_size
total_size += weight_size
# Add the last block
sharded_state_dicts.append(current_block)
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {TF2_WEIGHTS_NAME: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = TF2_WEIGHTS_NAME.replace(".h5", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.h5")
shards[shard_file] = shard
for weight in shard:
# remove the class name from the layer name for smooth loading
# this could be removed only if the loading is purely based on indexes
# and not layer names
# weight_name = "/".join(weight.name.split("/")[1:])
weight_name = weight.name
weight_map[weight_name] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
return shards, index
|
30,105 |
def get_manifest(idx, *, require=True, rebuild=False):
"""
Retrieve a manifest for this idx, loaded with `load_file_as_index`.
If a manifest exists and `rebuild` is False, return the manifest..
If a manifest does not exist or `rebuild` is True, try to build one.
If a manifest cannot be built and `require` is True, error exit.
In the case where `require=False` and a manifest cannot be built,
may return None. Otherwise always returns a manifest.
"""
from sourmash.index import CollectionManifest
m = idx.manifest
# has one, and don't want to rebuild? easy! return!
if m is not None and not rebuild:
debug_literal("get_manifest: found manifest")
return m
debug_literal(f"get_manifest: no manifest found / rebuild={rebuild}")
# CTB: CollectionManifest.create_manifest wants (ss, iloc).
# so this is an adaptor function! Might want to just change
# what `create_manifest` takes.
def manifest_iloc_iter(idx):
for (ss, loc, iloc) in idx._signatures_with_internal():
yield ss, iloc
# need to build one...
try:
m = CollectionManifest.create_manifest(manifest_iloc_iter(idx),
include_signature=False)
debug_literal("get_manifest: rebuilt manifest.")
except NotImplementedError:
if require:
error(f"ERROR: manifests cannot be generated for {idx.location}")
sys.exit(-1)
else:
debug_literal("get_manifest: cannot build manifest, not req'd")
return None
return m
|
def get_manifest(idx, *, require=True, rebuild=False):
"""
Retrieve a manifest for this idx, loaded with `load_file_as_index`.
If a manifest exists and `rebuild` is False, return the manifest.
If a manifest does not exist or `rebuild` is True, try to build one.
If a manifest cannot be built and `require` is True, error exit.
In the case where `require=False` and a manifest cannot be built,
may return None. Otherwise always returns a manifest.
"""
from sourmash.index import CollectionManifest
m = idx.manifest
# has one, and don't want to rebuild? easy! return!
if m is not None and not rebuild:
debug_literal("get_manifest: found manifest")
return m
debug_literal(f"get_manifest: no manifest found / rebuild={rebuild}")
# CTB: CollectionManifest.create_manifest wants (ss, iloc).
# so this is an adaptor function! Might want to just change
# what `create_manifest` takes.
def manifest_iloc_iter(idx):
for (ss, loc, iloc) in idx._signatures_with_internal():
yield ss, iloc
# need to build one...
try:
m = CollectionManifest.create_manifest(manifest_iloc_iter(idx),
include_signature=False)
debug_literal("get_manifest: rebuilt manifest.")
except NotImplementedError:
if require:
error(f"ERROR: manifests cannot be generated for {idx.location}")
sys.exit(-1)
else:
debug_literal("get_manifest: cannot build manifest, not req'd")
return None
return m
|
10,463 |
def safe_eval(expr, locals=None, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
locals = {} if locals is None else locals
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
OUR_GLOBALS = {
'__builtins__': {}, # avoid global builtins as per eval docs
'false': False,
'null': None,
'true': True,
# also add back some builtins we do need
'True': True,
'False': False,
'None': None
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
# ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
# And in Python 3.6 too, although not encountered until Python 3.8, see https://bugs.python.org/issue32892
if sys.version_info[:2] >= (3, 6):
SAFE_NODES.update(
set(
(ast.Constant,)
)
)
filter_list = []
for filter_ in filter_loader.all():
filter_list.extend(filter_.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, to_native(expr), 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
result = eval(compiled, OUR_GLOBALS, dict(locals))
if PY2:
# On Python 2 u"{'key': 'value'}" is evaluated to {'key': 'value'},
# ensure it is converted to {u'key': u'value'}.
result = container_to_text(result)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
def safe_eval(expr, locals=None, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
locals = {} if locals is None else locals
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
OUR_GLOBALS = {
'__builtins__': {}, # avoid global builtins as per eval docs
'false': False,
'null': None,
'true': True,
# also add back some builtins we do need
'True': True,
'False': False,
'None': None
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
# ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
# And in Python 3.6 too, although not encountered until Python 3.8, see https://bugs.python.org/issue32892
if sys.version_info[:2] >= (3, 6):
SAFE_NODES.update(
set(
(ast.Constant,)
)
)
filter_list = []
for filter_ in filter_loader.all():
filter_list.extend(filter_.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
# Disallow calls to builtin functions that we have not vetted
# as safe. Other functions are excluded by setting locals in
# the call to eval() later on
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, to_native(expr, errors='surrogate_or_strict'), 'eval')
# Note: passing our own globals and locals here constrains what
# callables (and other identifiers) are recognized. this is in
# addition to the filtering of builtins done in CleansingNodeVisitor
result = eval(compiled, OUR_GLOBALS, dict(locals))
if PY2:
# On Python 2 u"{'key': 'value'}" is evaluated to {'key': 'value'},
# ensure it is converted to {u'key': u'value'}.
result = container_to_text(result)
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
3,039 |
def _align_method_FRAME(left, right, axis, flex=False, level=None):
"""
Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
Parameters
----------
left : DataFrame
right : Any
axis: int, str, or None
flex: bool or None, default False
Whether this is a flex op, in which case we reindex.
None indices not to check for alignment.
level : int or level name, default None
Returns
-------
left : DataFrame
right : Any
"""
def to_series(right):
msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
msg.format(req_len=len(left.index), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(
msg.format(req_len=len(left.columns), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError(
"Unable to coerce to DataFrame, shape "
f"must be {left.shape}: given {right.shape}"
)
elif right.ndim > 2:
raise ValueError(
f"Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH17901
right = to_series(right)
if flex is not None and isinstance(right, ABCDataFrame):
if not left._indexed_same(right):
if flex:
left, right = left.align(right, join="outer", level=level, copy=False)
else:
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
elif isinstance(right, ABCSeries):
# axis=1 is default for DataFrame-with-Series op
axis = left._get_axis_number(axis) if axis is not None else 1
left, right = left.align(
right, join="outer", axis=axis, level=level, copy=False
)
return left, right
|
def _align_method_FRAME(left, right, axis, flex=False, level=None):
"""
Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
Parameters
----------
left : DataFrame
right : Any
axis: int, str, or None
flex: bool or None, default False
Whether this is a flex op, in which case we reindex.
None indicates not to check for alignment.
level : int or level name, default None
Returns
-------
left : DataFrame
right : Any
"""
def to_series(right):
msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
msg.format(req_len=len(left.index), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(
msg.format(req_len=len(left.columns), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError(
"Unable to coerce to DataFrame, shape "
f"must be {left.shape}: given {right.shape}"
)
elif right.ndim > 2:
raise ValueError(
f"Unable to coerce to Series/DataFrame, dim must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH17901
right = to_series(right)
if flex is not None and isinstance(right, ABCDataFrame):
if not left._indexed_same(right):
if flex:
left, right = left.align(right, join="outer", level=level, copy=False)
else:
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
elif isinstance(right, ABCSeries):
# axis=1 is default for DataFrame-with-Series op
axis = left._get_axis_number(axis) if axis is not None else 1
left, right = left.align(
right, join="outer", axis=axis, level=level, copy=False
)
return left, right
|
31,067 |
def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance)
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
|
def main():
install_logging('Destroy_instances.log')
circle_aritfact = sys.argv[1]
env_file = sys.argv[2]
instance_role = sys.argv[3]
time_to_live = sys.argv[4]
with open(env_file, 'r') as json_file:
env_results = json.load(json_file)
filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role]
for env in filtered_results:
logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}')
ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \
'"sudo chmod -R 755 /var/log/demisto"'
scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \
'{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"'
try:
logging.debug(f'Trying to run {ssh_string}')
subprocess.check_output(
ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(exc.output)
try:
server_ip = env["InstanceDNS"].split('.')[0]
subprocess.check_output(
scp_string.format(
env["SSHuser"],
env["InstanceDNS"],
"{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)),
shell=True)
except subprocess.CalledProcessError as exc:
logging.exception(f'Failed downloading server logs from server {env["InstanceDNS"]}')
if time_to_live:
logging.info(f'Skipping - Time to live was set to {time_to_live} minutes')
continue
if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))):
logging.info(f'Destroying instance {env.get("Role", "Unknown role")}')
rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"])
if aws_functions.isError(rminstance):
logging.error(rminstance)
else:
logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
|
32,591 |
def get_threats(client: Client, after):
before = get_timestamp_format('now')
threats_ids = get_list_threats(client, after, before)
messages = []
if threats_ids:
for threat in reversed(threats_ids):
messages += get_messages_by_datetime(client, threat.get('threatId'), after, before)
ordered_messages = sorted(messages, key=lambda d: d['receivedTime'])
return ordered_messages, before
return [], before
|
def get_events(client: Client, after):
before = get_timestamp_format('now')
threats_ids = get_list_threats(client, after, before)
messages = []
if threats_ids:
for threat in reversed(threats_ids):
messages += get_messages_by_datetime(client, threat.get('threatId'), after, before)
ordered_messages = sorted(messages, key=lambda d: d['receivedTime'])
return ordered_messages, before
return [], before
|
44,071 |
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
3,557 |
def migrate_data(apps, schema_editor):
RemoteRepository = apps.get_model('oauth', 'RemoteRepository')
queryset = RemoteRepository.objects.filter(project__isnull=False).select_related('project')
for rr in queryset.iterator():
rr.project.remote_repository_id = rr.pk
rr.project.save()
|
def migrate_data(apps, schema_editor):
RemoteRepository = apps.get_model('oauth', 'RemoteRepository')
queryset = RemoteRepository.objects.filter(project__isnull=False).select_related('project').only('pk', 'project')
for rr in queryset.iterator():
rr.project.remote_repository_id = rr.pk
rr.project.save()
|
735 |
def open_in_browser(
response: Union["scrapy.http.response.html.HtmlResponse", "scrapy.http.response.text.TextResponse"],
_openfunc: Callable[[str], Any] = webbrowser.open,
) -> Any:
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = f'\\1<base href="{response.url}">'
body = re.sub(b"(<head.*?>)", to_bytes(repl), body)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: "
f"{response.__class__.__name__}")
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc(f"file://{fname}")
|
def open_in_browser(
response: Union["scrapy.http.response.html.HtmlResponse", "scrapy.http.response.text.TextResponse"],
_openfunc: Callable[[str], Any] = webbrowser.open,
) -> Any:
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = fr'\1<base href="{response.url}">'
body = re.sub(b"(<head.*?>)", to_bytes(repl), body)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: "
f"{response.__class__.__name__}")
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc(f"file://{fname}")
|
42,422 |
def main(address1: str,address2: str, external: int,pvmodul: str,pvwattin: int):
# external is 0 or 1
log.debug("Beginning update")
address = ['none']*2
address[0]=address1
address[1]=address2
if pvmodul == "none":
pvother = 0
else:
pvother = 1
update_e3dc_battery(address,external,pvother,pvwattin)
log.debug("Update completed successfully")
|
def main(address1: str,address2: str, external: int,pvmodul: str,pvwattin: int):
# external is 0 or 1
log.debug("Beginning update")
address = ['none']*2
address[0]=address1
address[1]=address2
pv_other = pvmodul != "none"
update_e3dc_battery(address,external,pvother,pvwattin)
log.debug("Update completed successfully")
|
4,988 |
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure, or activate an existing figure.
Parameters
----------
num : int or str or `.Figure`, optional
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made
active and returned. An integer refers to the ``Figure.number``
attribute, a string refers to the figure label.
If there is no figure with the identifier or *num* is not given, a new
figure is created, made active and returned. If *num* is an int, it
will be used for the ``Figure.number`` attribute, otherwise, an
auto-generated integer value is used (starting at 1 and incremented
for each new figure). If *num* is a string, the figure label and the
window title is set to this value.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches.
dpi : float, default: :rc:`figure.dpi`
The resolution of the figure in dots-per-inch.
facecolor : color, default: :rc:`figure.facecolor`
The background color.
edgecolor : color, default: :rc:`figure.edgecolor`
The border color.
frameon : bool, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, default: False
If True and the figure already exists, then it is cleared.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with `add_subplot` or
`~.pyplot.subplot2grid`.)
**kwargs : optional
See `~.matplotlib.figure.Figure` for other possible arguments.
Returns
-------
`~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if isinstance(num, Figure):
if num.canvas.manager is None:
raise ValueError("Figure is not managed by pyplot")
_pylab_helpers.Gcf.set_active(num.canvas.manager)
return num
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
fig_label = ''
if num is None:
num = next_num
elif isinstance(num, str):
fig_label = num
all_labels = get_figlabels()
if fig_label not in all_labels:
if fig_label == 'all':
cbook._warn_external(
"close('all') closes all existing figures.")
num = next_num
else:
inum = all_labels.index(fig_label)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
manager = _pylab_helpers.Gcf.get_fig_manager(num)
if manager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) == max_open_warning >= 1:
cbook._warn_external(
f"More than {max_open_warning} figures have been opened. "
f"Figures created through the pyplot interface "
f"(`matplotlib.pyplot.figure`) are retained until explicitly "
f"closed and may consume too much memory. (To control this "
f"warning, see the rcParam `figure.max_open_warning`).",
RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
manager = new_figure_manager(
num, figsize=figsize, dpi=dpi,
facecolor=facecolor, edgecolor=edgecolor, frameon=frameon,
FigureClass=FigureClass, **kwargs)
fig = manager.canvas.figure
if fig_label:
fig.set_label(fig_label)
_pylab_helpers.Gcf._set_new_active_manager(manager)
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
manager.canvas.figure.clear()
return manager.canvas.figure
|
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure, or activate an existing figure.
Parameters
----------
num : int or str or `.Figure`, optional
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made
active and returned. An integer refers to the ``Figure.number``
attribute, a string refers to the figure label.
If there is no figure with the identifier or *num* is not given, a new
figure is created, made active and returned. If *num* is an int, it
will be used for the ``Figure.number`` attribute, otherwise, an
auto-generated integer value is used (starting at 1 and incremented
for each new figure). If *num* is a string, the figure label and the
window title is set to this value.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches.
dpi : float, default: :rc:`figure.dpi`
The resolution of the figure in dots-per-inch.
facecolor : color, default: :rc:`figure.facecolor`
The background color.
edgecolor : color, default: :rc:`figure.edgecolor`
The border color.
frameon : bool, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, default: False
If True and the figure already exists, then it is cleared.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with `add_subplot` or
`~.pyplot.subplot2grid`.)
**kwargs : optional
See `~.matplotlib.figure.Figure` for other possible arguments.
Returns
-------
`~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if isinstance(num, Figure):
if num.canvas.manager is None:
raise ValueError("The passed figure is not managed by pyplot")
_pylab_helpers.Gcf.set_active(num.canvas.manager)
return num
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
fig_label = ''
if num is None:
num = next_num
elif isinstance(num, str):
fig_label = num
all_labels = get_figlabels()
if fig_label not in all_labels:
if fig_label == 'all':
cbook._warn_external(
"close('all') closes all existing figures.")
num = next_num
else:
inum = all_labels.index(fig_label)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
manager = _pylab_helpers.Gcf.get_fig_manager(num)
if manager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) == max_open_warning >= 1:
cbook._warn_external(
f"More than {max_open_warning} figures have been opened. "
f"Figures created through the pyplot interface "
f"(`matplotlib.pyplot.figure`) are retained until explicitly "
f"closed and may consume too much memory. (To control this "
f"warning, see the rcParam `figure.max_open_warning`).",
RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
manager = new_figure_manager(
num, figsize=figsize, dpi=dpi,
facecolor=facecolor, edgecolor=edgecolor, frameon=frameon,
FigureClass=FigureClass, **kwargs)
fig = manager.canvas.figure
if fig_label:
fig.set_label(fig_label)
_pylab_helpers.Gcf._set_new_active_manager(manager)
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
manager.canvas.figure.clear()
return manager.canvas.figure
|
21,822 |
def _no_reported_version(
requirement: Requirement, extra: Optional[str] = None
):
if extra:
return (
f"Synapse {VERSION} needs {requirement} for {extra}, "
f"but can't determine {requirement.name}'s version."
)
else:
return (
f"Synapse {VERSION} needs {requirement},"
f"but can't determine {requirement.name}'s version"
)
|
def _no_reported_version(
requirement: Requirement, extra: Optional[str] = None
):
if extra:
return (
f"Synapse {VERSION} needs {requirement} for {extra}, "
f"but can't determine {requirement.name}'s version."
)
else:
return (
f"Synapse {VERSION} needs {requirement}, "
f"but can't determine {requirement.name}'s version"
)
|
10,524 |
def check_removal_version(v, version_name, collection_name_name, error_code='invalid-removal-version'):
version = v.get(version_name)
collection_name = v.get(collection_name_name)
if not isinstance(version, string_types) or not isinstance(collection_name, string_types):
# If they are not strings, schema validation will have already complained.
return v
if collection_name == 'ansible.builtin':
try:
parsed_version = StrictVersion()
parsed_version.parse(version)
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid ansible-base version: %s' % (version_name, version, exc)),
error_code=error_code)
return v
try:
parsed_version = SemanticVersion()
parsed_version.parse(version)
if parsed_version.major != 0 and (parsed_version.minor != 0 or parsed_version.patch != 0):
raise _add_ansible_error_code(
Invalid('%s (%r) must be a major release, not a minor or patch release (see specification at '
'https://semver.org/)' % (version_name, version)),
error_code='removal-version-must-be-major')
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid collection version (see specification at https://semver.org/): '
'%s' % (version_name, version, exc)),
error_code=error_code)
return v
|
def check_removal_version(v, version_name, collection_name_name, error_code='invalid-removal-version'):
version = v.get(version_name)
collection_name = v.get(collection_name_name)
if not isinstance(version, string_types) or not isinstance(collection_name, string_types):
# If they are not strings, schema validation will have already complained.
return v
if collection_name == 'ansible.builtin':
try:
parsed_version = StrictVersion()
parsed_version.parse(version)
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid ansible-base version: %s' % (version_name, version, exc)),
error_code=error_code)
return v
try:
parsed_version = SemanticVersion()
parsed_version.parse(version)
if parsed_version.major == 0 or (parsed_version.major != 0 and (parsed_version.minor != 0 or parsed_version.patch != 0)):
raise _add_ansible_error_code(
Invalid('%s (%r) must be a major release, not a minor or patch release (see specification at '
'https://semver.org/)' % (version_name, version)),
error_code='removal-version-must-be-major')
except ValueError as exc:
raise _add_ansible_error_code(
Invalid('%s (%r) is not a valid collection version (see specification at https://semver.org/): '
'%s' % (version_name, version, exc)),
error_code=error_code)
return v
|
42,986 |
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the Numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
1,649 |
def test_one_hot_encoder_drop_equals_if_binary():
X = [['Male', 1], ['Female', 3], ['Female', 2]]
expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]])
ohe = OneHotEncoder(drop='if_binary')
ohe.fit(X)
result = ohe.transform(X).toarray()
assert_array_equal(expected, result)
|
def test_one_hot_encoder_drop_equals_if_binary():
X = [['Male', 1], ['Female', 3], ['Female', 2]]
expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]])
ohe = OneHotEncoder(drop='if_binary')
ohe.fit(X)
X_trans = ohe.fit_transform(X)
assert_array_equal(expected, result)
|
24,073 |
def upgradeConfigFrom_2_to_3(profile):
# The winConsoleSpeakPasswords option has been moved to the terminals section of the config.
try:
speakPasswords = profile["UIA"]["winConsoleSpeakPasswords"]
except KeyError:
# Setting does not exist, no need for upgrade of this setting
log.debug("winConsoleSpeakPasswords not present, no action taken.")
pass
else:
del profile["UIA"]["winConsoleSpeakPasswords"]
if "terminals" not in profile:
profile["terminals"] = {}
profile["terminals"]["speakPasswords"] = speakPasswords
|
def upgradeConfigFrom_1_to_2(profile):
# Schema has been modified to split cursor shape into focus and review shapes
# Previously, the same cursor shape was used for focus and review
try:
cursorShape = int(profile["braille"]["cursorShape"])
except KeyError as e:
# Setting does not exist, no need for upgrade of this setting
log.debug("No cursorShape, no action taken.")
pass
else:
del profile["braille"]["cursorShape"]
profile["braille"]["cursorShapeFocus"] = cursorShape
profile['UIA']['allowInMSWord'] = AllowUiaInMSWord.ALWAYS
# The winConsoleSpeakPasswords option has been moved to the terminals section of the config.
try:
speakPasswords = profile["UIA"]["winConsoleSpeakPasswords"]
except KeyError:
# Setting does not exist, no need for upgrade of this setting
log.debug("winConsoleSpeakPasswords not present, no action taken.")
pass
else:
del profile["UIA"]["winConsoleSpeakPasswords"]
if "terminals" not in profile:
profile["terminals"] = {}
profile["terminals"]["speakPasswords"] = speakPasswords
|
29,964 |
def pause(menu_dict):
'''
Pause the program execution and invok the user to make one or more valid options.
Called by: fx_karoo_gp
Arguments required: menu_dict
'''
options = ['', '?', 'help', 'i', 'm', 'g', 's', 'db', 'ts', 'min', 'bal',
'l', 'pop', 'e', 'p', 'id', 'dir', 'load', 'w', 'add', 'q']
while True:
try:
menu = input('\n\t\033[36m (pause) \033[0;0m')
if menu in options:
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter \033[1m?\033[0;0m\033[32m to review '
'your options. Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
if menu == '':
menu_dict['input_a'] = 'esc' # exit (pause) with ENTER
elif menu == '?' or menu == 'help':
print('\n\t\033[32m Select from one of the following options:\033[0;0m')
print('\t\033[36m\033[1m i \t\033[0;0m engage Interactive display mode')
print('\t\033[36m\033[1m m \t\033[0;0m engage Minimal display mode')
print('\t\033[36m\033[1m g \t\033[0;0m engage Generation display mode')
print('\t\033[36m\033[1m s \t\033[0;0m engage Silent display mode')
print('\t\033[36m\033[1m db \t\033[0;0m engage De-Bug display mode')
print('')
print('\t\033[36m\033[1m ts \t\033[0;0m adjust tournament size')
print('\t\033[36m\033[1m min \t\033[0;0m adjust minimum number of nodes')
# print('\t\033[36m\033[1m max \t\033[0;0m adjust maximum Tree depth') # NEED TO ADD
print('\t\033[36m\033[1m bal \t\033[0;0m adjust balance of genetic operators')
print('')
print('\t\033[36m\033[1m l \t\033[0;0m list Trees with leading fitness scores')
print('\t\033[36m\033[1m pop \t\033[0;0m list Trees in current population')
print('\t\033[36m\033[1m e \t\033[0;0m evaluate a single Tree against the test data')
print('\t\033[36m\033[1m p \t\033[0;0m print a single Tree to screen')
print('')
print('\t\033[36m\033[1m id \t\033[0;0m display current generation ID')
print('\t\033[36m\033[1m dir \t\033[0;0m display current working directory')
# print('\t\033[36m\033[1m load \t\033[0;0m load population_s (seed) '
# 'to replace population_a (current)') # NEED TO FIX
print('\t\033[36m\033[1m w \t\033[0;0m write the evolving next_gen_trees to disk')
print('')
print('\t\033[36m\033[1m add \t\033[0;0m add generations and continue your run')
print('\t\033[36m\033[1m q \t\033[0;0m quit Karoo GP')
elif menu == 'i':
menu_dict['display'] = 'i'
print('\n\t Interactive display mode engaged (for control freaks)')
elif menu == 'g':
menu_dict['display'] = 'g'
print('\n\t Generation display mode engaged (for recovering control freaks)')
elif menu == 'm':
menu_dict['display'] = 'm'
print('\n\t Minimal display mode engaged (for GP gurus)')
elif menu == 's':
menu_dict['display'] = 's'
print('\n\t Silent display mode engaged (for zen masters)')
elif menu == 'db':
menu_dict['display'] = 'db'
print('\n\t De-Bug display mode engaged (for evolutionary biologists)')
elif menu == 'ts': # adjust the tournament size
while True:
try:
print('\n\t The current tournament size is:', menu_dict['tourn_size'])
query = input('\t Adjust the tournament size (suggest 7 for each 100): ')
if query == '':
break
elif int(query) in list(range(2, menu_dict['tree_pop_max']+1)):
menu_dict['tourn_size'] = int(query)
break # rebuilt 20190603
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 2 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'min': # adjust the minimum number of nodes per Tree
# max_nodes = 2**(tree_depth_base +1) - 1 # NEED TO calc to replace
# upper limit in range but tree_depth_base is not global - 2018 04/22
while True:
try:
print('\n\t The current minimum number of nodes is:',
menu_dict['tree_depth_min'])
query = input('\t Adjust the minimum number of nodes for all Trees (min 3): ')
if query == '':
break
elif int(query) in list(range(3, 1000)):
menu_dict['tree_depth_min'] = int(query)
break # rebuilt 20190603
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 3 including 1000. '
'Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
# NEED TO ADD
#elif menu == 'max': # adjust the maximum Tree depth
# while True:
# try:
# print('\n\t The current \033[3madjusted\033[0;0m maximum Tree depth is:',
# gp.tree_depth_max)
# query = input('\n\t Adjust the global maximum Tree depth to (1 ... 10): ')
# if int(query) not in list(range(1, 11)): raise ValueError()
# if query < gp.tree_depth_max:
# print('\n\t\033[32m This value is less than the current value.\033[0;0m')
# conf = input('\n\t Are you ok with this? (y/n) ')
# if conf == 'n': break
# except ValueError:
# print('\n\t\033[32m Enter a number from 1 including 10. '
# 'Try again ...\033[0;0m')
# except KeyboardInterrupt:
# print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'bal': # adjust the balance of genetic operators'
print('\n\t The current balance of genetic operators is:')
print('\t\t Reproduction:', menu_dict['evolve_repro'])
tmp_repro = menu_dict['evolve_repro']
print('\t\t Point Mutation:', menu_dict['evolve_point'])
tmp_point = menu_dict['evolve_point']
print('\t\t Branch Mutation:', menu_dict['evolve_branch'])
tmp_branch = menu_dict['evolve_branch']
print('\t\t Crossover:', menu_dict['evolve_cross'], '\n')
tmp_cross = menu_dict['evolve_cross']
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Reproduction: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_repro = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Point Mutation: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_point = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Branch Mutation: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_branch = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Crossover: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_cross = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
if tmp_repro + tmp_point + tmp_branch + tmp_cross != menu_dict['tree_pop_max']:
print('\n\t The sum of the above does not equal %s. '
'Try again ...' % str(menu_dict['tree_pop_max']))
else:
print('\n\t The revised balance of genetic operators is:')
print('\t\t Reproduction:', tmp_repro)
menu_dict['evolve_repro'] = tmp_repro
print('\t\t Point Mutation:', tmp_point)
menu_dict['evolve_point'] = tmp_point
print('\t\t Branch Mutation:', tmp_branch)
menu_dict['evolve_branch'] = tmp_branch
print('\t\t Crossover:', tmp_cross)
menu_dict['evolve_cross'] = tmp_cross
elif menu == 'l': # display dictionary of Trees with the best fitness score
print('\n\t The leading Trees and their associated expressions are:')
for n in sorted(menu_dict['fittest_dict']):
print('\t ', n, ':', menu_dict['fittest_dict'][n])
elif menu == 'pop': # list Trees in the current population
if menu_dict['next_gen_len'] == 0:
menu_dict['input_a'] = 'population'
else:
menu_dict['input_a'] = 'next_gen'
elif menu == 'e': # evaluate a Tree against the TEST data
has_next_gen = menu_dict['next_gen_len'] > 0
pop_len_key = 'next_gen_len' if has_next_gen else 'population_len'
while True:
try:
query = input('\n\t Select a Tree to evaluate: ')
if query == '':
break
elif int(query) in range(1, menu_dict[pop_len_key] + 1):
menu_dict['input_a'] = 'eval'
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including %s. '
'Try again ...\033[0;0m' % str(menu_dict[pop_len_key]))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'p': # print a Tree to screen -- NEED TO ADD: SymPy graphical print option
has_next_gen = menu_dict['next_gen_len'] > 0
menu_dict['input_a'] = 'print_b' if has_next_gen else 'print_a'
pop_len_key = 'next_gen_len' if has_next_gen else 'population_len'
while True:
try:
query = input('\n\t Select a Tree to print: ')
if query == '':
break
elif (int(query) in list(range(1, menu_dict[pop_len_key] + 1))):
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including %s. '
'Try again ...\033[0;0m' % str(menu_dict[pop_len_key] + 1))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'id':
print('\n\t Current generation:', menu_dict['gen_id'])
elif menu == 'dir':
print('\n\t Current working directory:', menu_dict['path'])
# NEED TO REBUILD
#elif menu == 'load': # load population_s to replace population_a
# while True:
# try:
# query = input('\n\t Overwrite the current population with population_s? '
# '(\033[1my\033[0;0m\033[32m/\033[1mn\033[0;0m\033[32m)\033[0;0m ')
# if query == 'y': menu_dict['input_a'] = 'load'; break
# elif query == 'n': break
# else: raise ValueError()
# except ValueError:
# print('\n\t\033[32m Enter (\033[1my\033[0;0m)es or (\033[1mn\033[0;0m)o. '
# 'Try again ...\033[0;0m')
# except KeyboardInterrupt:
# print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'w': # write the evolving next_gen_trees to disk
if menu_dict['gen_id'] > 1:
menu_dict['input_a'] = 'write'
else:
print('\n\t\033[36m The evolving next_gen_trees does not yet exist\033[0;0m')
elif menu == 'add': # add generations and continue a GP run
if menu_dict['gen_id'] == menu_dict['gen_max']:
while True:
try:
query = input('\n\t\033[3m You are at the end of your run.'
'\033[0;0m\n\t Add more generations to continue '
'(1-100 or ENTER to escape): ')
if query == '':
break
elif int(query) in list(range(1, 101)):
menu_dict['input_a'] = 'add'
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including 100. '
'Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m '
'to quit\033[0;0m')
else:
menu_dict['input_a'] = 'add'
elif menu == 'q': # quit (in case you didn't figure that one out :)
while True:
try:
query = input('\n\t\033[32m Quit Karoo GP? '
'(\033[1my\033[0;0m\033[32m/\033[1mn\033[0;0m'
'\033[32m)\033[0;0m ')
if query == 'y':
menu_dict['input_a'] = 'quit'
break
else:
break
except ValueError:
print('\n\t\033[32m Enter \033[1my\033[0;0m\033[32mes or '
'\033[1mn\033[0;0m\033[32mo\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m '
'to quit\033[0;0m')
return menu_dict
|
def pause(menu_dict):
'''
Pause the program execution and invok the user to make one or more valid options.
Called by: fx_karoo_gp
Arguments required: menu_dict
'''
options = ['', '?', 'help', 'i', 'm', 'g', 's', 'db', 'ts', 'min', 'bal',
'l', 'pop', 'e', 'p', 'id', 'dir', 'load', 'w', 'add', 'q']
while True:
try:
menu = input('\n\t\033[36m (pause) \033[0;0m')
if menu in options:
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter \033[1m?\033[0;0m\033[32m to review '
'your options. Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
if menu == '':
menu_dict['input_a'] = 'esc' # exit (pause) with ENTER
elif menu == '?' or menu == 'help':
print('\n\t\033[32m Select from one of the following options:\033[0;0m')
print('\t\033[36m\033[1m i \t\033[0;0m engage Interactive display mode')
print('\t\033[36m\033[1m m \t\033[0;0m engage Minimal display mode')
print('\t\033[36m\033[1m g \t\033[0;0m engage Generation display mode')
print('\t\033[36m\033[1m s \t\033[0;0m engage Silent display mode')
print('\t\033[36m\033[1m db \t\033[0;0m engage De-Bug display mode')
print('')
print('\t\033[36m\033[1m ts \t\033[0;0m adjust tournament size')
print('\t\033[36m\033[1m min \t\033[0;0m adjust minimum number of nodes')
# print('\t\033[36m\033[1m max \t\033[0;0m adjust maximum Tree depth') # NEED TO ADD
print('\t\033[36m\033[1m bal \t\033[0;0m adjust balance of genetic operators')
print('')
print('\t\033[36m\033[1m l \t\033[0;0m list Trees with leading fitness scores')
print('\t\033[36m\033[1m pop \t\033[0;0m list Trees in current population')
print('\t\033[36m\033[1m e \t\033[0;0m evaluate a single Tree against the test data')
print('\t\033[36m\033[1m p \t\033[0;0m print a single Tree to screen')
print('')
print('\t\033[36m\033[1m id \t\033[0;0m display current generation ID')
print('\t\033[36m\033[1m dir \t\033[0;0m display current working directory')
# print('\t\033[36m\033[1m load \t\033[0;0m load population_s (seed) '
# 'to replace population_a (current)') # NEED TO FIX
print('\t\033[36m\033[1m w \t\033[0;0m write the evolving next_gen_trees to disk')
print('')
print('\t\033[36m\033[1m add \t\033[0;0m add generations and continue your run')
print('\t\033[36m\033[1m q \t\033[0;0m quit Karoo GP')
elif menu == 'i':
menu_dict['display'] = 'i'
print('\n\t Interactive display mode engaged (for control freaks)')
elif menu == 'g':
menu_dict['display'] = 'g'
print('\n\t Generation display mode engaged (for recovering control freaks)')
elif menu == 'm':
menu_dict['display'] = 'm'
print('\n\t Minimal display mode engaged (for GP gurus)')
elif menu == 's':
menu_dict['display'] = 's'
print('\n\t Silent display mode engaged (for zen masters)')
elif menu == 'db':
menu_dict['display'] = 'db'
print('\n\t De-Bug display mode engaged (for evolutionary biologists)')
elif menu == 'ts': # adjust the tournament size
while True:
try:
print('\n\t The current tournament size is:', menu_dict['tourn_size'])
query = input('\t Adjust the tournament size (suggest 7 for each 100): ')
if query == '':
break
elif int(query) in list(range(2, menu_dict['tree_pop_max']+1)):
menu_dict['tourn_size'] = int(query)
break # rebuilt 20190603
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 2 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'min': # adjust the minimum number of nodes per Tree
# max_nodes = 2**(tree_depth_base +1) - 1 # NEED TO calc to replace
# upper limit in range but tree_depth_base is not global - 2018 04/22
while True:
try:
print('\n\t The current minimum number of nodes is:',
menu_dict['tree_depth_min'])
query = input('\t Adjust the minimum number of nodes for all Trees (min 3): ')
if query == '':
break
elif int(query) in list(range(3, 1000)):
menu_dict['tree_depth_min'] = int(query)
break # rebuilt 20190603
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 3 including 1000. '
'Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
# NEED TO ADD
#elif menu == 'max': # adjust the maximum Tree depth
# while True:
# try:
# print('\n\t The current \033[3madjusted\033[0;0m maximum Tree depth is:',
# gp.tree_depth_max)
# query = input('\n\t Adjust the global maximum Tree depth to (1 ... 10): ')
# if int(query) not in list(range(1, 11)): raise ValueError()
# if query < gp.tree_depth_max:
# print('\n\t\033[32m This value is less than the current value.\033[0;0m')
# conf = input('\n\t Are you ok with this? (y/n) ')
# if conf == 'n': break
# except ValueError:
# print('\n\t\033[32m Enter a number from 1 including 10. '
# 'Try again ...\033[0;0m')
# except KeyboardInterrupt:
# print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'bal': # adjust the balance of genetic operators'
print('\n\t The current balance of genetic operators is:')
print('\t\t Reproduction:', menu_dict['evolve_repro'])
tmp_repro = menu_dict['evolve_repro']
print('\t\t Point Mutation:', menu_dict['evolve_point'])
tmp_point = menu_dict['evolve_point']
print('\t\t Branch Mutation:', menu_dict['evolve_branch'])
tmp_branch = menu_dict['evolve_branch']
print('\t\t Crossover:', menu_dict['evolve_cross'], '\n')
tmp_cross = menu_dict['evolve_cross']
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Reproduction: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_repro = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Point Mutation: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_point = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Branch Mutation: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_branch = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
while True:
try:
query = input('\t Enter quantity of Trees to be generated by Crossover: ')
if query == '':
break
elif int(query) in list(range(0, 1000)):
tmp_cross = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 0 including %s. '
'Try again ...\033[0;0m' % str(menu_dict['tree_pop_max']))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
if tmp_repro + tmp_point + tmp_branch + tmp_cross != menu_dict['tree_pop_max']:
print('\n\t The sum of the above does not equal %s. '
'Try again ...' % str(menu_dict['tree_pop_max']))
else:
print('\n\t The revised balance of genetic operators is:')
print('\t\t Reproduction:', tmp_repro)
menu_dict['evolve_repro'] = tmp_repro
print('\t\t Point Mutation:', tmp_point)
menu_dict['evolve_point'] = tmp_point
print('\t\t Branch Mutation:', tmp_branch)
menu_dict['evolve_branch'] = tmp_branch
print('\t\t Crossover:', tmp_cross)
menu_dict['evolve_cross'] = tmp_cross
elif menu == 'l': # display dictionary of Trees with the best fitness score
print('\n\t The leading Trees and their associated expressions are:')
for n in sorted(menu_dict['fittest_dict']):
print('\t ', n, ':', menu_dict['fittest_dict'][n])
elif menu == 'pop': # list Trees in the current population
if menu_dict['next_gen_len'] == 0:
menu_dict['input_a'] = 'population'
else:
menu_dict['input_a'] = 'next_gen'
elif menu == 'e': # evaluate a Tree against the TEST data
has_next_gen = menu_dict['next_gen_len'] > 0
pop_len_key = 'next_gen_len' if has_next_gen else 'population_len'
while True:
try:
query = input('\n\t Select a Tree to evaluate: ')
if query == '':
break
elif int(query) in range(1, menu_dict[pop_len_key] + 1):
menu_dict['input_a'] = 'eval'
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including %s. '
'Try again ...\033[0;0m' % str(menu_dict[pop_len_key] + 1))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'p': # print a Tree to screen -- NEED TO ADD: SymPy graphical print option
has_next_gen = menu_dict['next_gen_len'] > 0
menu_dict['input_a'] = 'print_b' if has_next_gen else 'print_a'
pop_len_key = 'next_gen_len' if has_next_gen else 'population_len'
while True:
try:
query = input('\n\t Select a Tree to print: ')
if query == '':
break
elif (int(query) in list(range(1, menu_dict[pop_len_key] + 1))):
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including %s. '
'Try again ...\033[0;0m' % str(menu_dict[pop_len_key] + 1))
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'id':
print('\n\t Current generation:', menu_dict['gen_id'])
elif menu == 'dir':
print('\n\t Current working directory:', menu_dict['path'])
# NEED TO REBUILD
#elif menu == 'load': # load population_s to replace population_a
# while True:
# try:
# query = input('\n\t Overwrite the current population with population_s? '
# '(\033[1my\033[0;0m\033[32m/\033[1mn\033[0;0m\033[32m)\033[0;0m ')
# if query == 'y': menu_dict['input_a'] = 'load'; break
# elif query == 'n': break
# else: raise ValueError()
# except ValueError:
# print('\n\t\033[32m Enter (\033[1my\033[0;0m)es or (\033[1mn\033[0;0m)o. '
# 'Try again ...\033[0;0m')
# except KeyboardInterrupt:
# print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m to quit\033[0;0m')
elif menu == 'w': # write the evolving next_gen_trees to disk
if menu_dict['gen_id'] > 1:
menu_dict['input_a'] = 'write'
else:
print('\n\t\033[36m The evolving next_gen_trees does not yet exist\033[0;0m')
elif menu == 'add': # add generations and continue a GP run
if menu_dict['gen_id'] == menu_dict['gen_max']:
while True:
try:
query = input('\n\t\033[3m You are at the end of your run.'
'\033[0;0m\n\t Add more generations to continue '
'(1-100 or ENTER to escape): ')
if query == '':
break
elif int(query) in list(range(1, 101)):
menu_dict['input_a'] = 'add'
menu_dict['input_b'] = int(query)
break
else:
raise ValueError()
except ValueError:
print('\n\t\033[32m Enter a number from 1 including 100. '
'Try again ...\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m '
'to quit\033[0;0m')
else:
menu_dict['input_a'] = 'add'
elif menu == 'q': # quit (in case you didn't figure that one out :)
while True:
try:
query = input('\n\t\033[32m Quit Karoo GP? '
'(\033[1my\033[0;0m\033[32m/\033[1mn\033[0;0m'
'\033[32m)\033[0;0m ')
if query == 'y':
menu_dict['input_a'] = 'quit'
break
else:
break
except ValueError:
print('\n\t\033[32m Enter \033[1my\033[0;0m\033[32mes or '
'\033[1mn\033[0;0m\033[32mo\033[0;0m')
except KeyboardInterrupt:
print('\n\n\t\033[32m Enter \033[1mq\033[0;0m\033[32m '
'to quit\033[0;0m')
return menu_dict
|
42,345 |
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
31,979 |
def get_indicators_to_format(indicator_searcher: IndicatorsSearcher, request_args: RequestArguments) ->\
Union[IO, IO[str]]:
"""
Finds indicators demisto.searchIndicators, and returns the indicators in file writen in requested format
Parameters:
indicator_searcher (IndicatorsSearcher): The indicator searcher used to look for indicators
request_args (RequestArguments)
Returns:
(IO): indicators in file writen in requested format
"""
f = tempfile.TemporaryFile(mode='w+t')
list_fields = replace_field_name_to_output_format(request_args.fields_to_present)
headers_was_writen = False
files_by_category = {} # type:Dict
try:
for ioc_res in indicator_searcher:
fetched_iocs = ioc_res.get('iocs') or []
for ioc in fetched_iocs:
if request_args.out_format == FORMAT_PROXYSG:
files_by_category = create_proxysg_out_format(ioc, files_by_category, request_args)
if request_args.out_format == FORMAT_MWG:
f.write(create_mwg_out_format(ioc, request_args, headers_was_writen))
headers_was_writen = True
if request_args.out_format == FORMAT_JSON:
f.write(create_json_out_format(list_fields, ioc, request_args, headers_was_writen))
headers_was_writen = True
if request_args.out_format == FORMAT_TEXT:
# save only the value and type of each indicator
f.write(str(json.dumps({"value": ioc.get("value"),
"indicator_type": ioc.get("indicator_type")})) + "\n")
if request_args.out_format == FORMAT_CSV:
f.write(create_csv_out_format(headers_was_writen, list_fields, ioc, request_args))
headers_was_writen = True
except Exception as e:
demisto.debug(e)
if request_args.out_format in [FORMAT_JSON, FORMAT_XSOAR_JSON, FORMAT_JSON_SEQ, FORMAT_XSOAR_JSON_SEQ]:
f.write(']')
if request_args.out_format == FORMAT_PROXYSG:
f = create_proxysg_all_category_out_format(f, files_by_category)
return f
|
def get_indicators_to_format(indicator_searcher: IndicatorsSearcher, request_args: RequestArguments) ->\
Union[IO, IO[str]]:
"""
Finds indicators using demisto.searchIndicators, and returns the indicators in file written in the requested format
Parameters:
indicator_searcher (IndicatorsSearcher): The indicator searcher used to look for indicators
request_args (RequestArguments)
Returns:
(IO): indicators in file writen in requested format
"""
f = tempfile.TemporaryFile(mode='w+t')
list_fields = replace_field_name_to_output_format(request_args.fields_to_present)
headers_was_writen = False
files_by_category = {} # type:Dict
try:
for ioc_res in indicator_searcher:
fetched_iocs = ioc_res.get('iocs') or []
for ioc in fetched_iocs:
if request_args.out_format == FORMAT_PROXYSG:
files_by_category = create_proxysg_out_format(ioc, files_by_category, request_args)
if request_args.out_format == FORMAT_MWG:
f.write(create_mwg_out_format(ioc, request_args, headers_was_writen))
headers_was_writen = True
if request_args.out_format == FORMAT_JSON:
f.write(create_json_out_format(list_fields, ioc, request_args, headers_was_writen))
headers_was_writen = True
if request_args.out_format == FORMAT_TEXT:
# save only the value and type of each indicator
f.write(str(json.dumps({"value": ioc.get("value"),
"indicator_type": ioc.get("indicator_type")})) + "\n")
if request_args.out_format == FORMAT_CSV:
f.write(create_csv_out_format(headers_was_writen, list_fields, ioc, request_args))
headers_was_writen = True
except Exception as e:
demisto.debug(e)
if request_args.out_format in [FORMAT_JSON, FORMAT_XSOAR_JSON, FORMAT_JSON_SEQ, FORMAT_XSOAR_JSON_SEQ]:
f.write(']')
if request_args.out_format == FORMAT_PROXYSG:
f = create_proxysg_all_category_out_format(f, files_by_category)
return f
|
56,264 |
def main():
args = parse()
core = Core()
log.info("Reading model {}".format(args.model))
ov_encoder = core.read_model(args.model)
inp_shapes = {name:obj.shape for obj in ov_encoder.inputs for name in obj.get_names()}
out_shapes = {name:obj.shape for obj in ov_encoder.outputs for name in obj.get_names()}
state_out_names = [n for n in out_shapes.keys() if "state" in n]
state_inp_names = [n for n in inp_shapes.keys() if "state" in n]
if len(state_inp_names) != len(state_out_names):
raise RuntimeError(
"Number of input states of the model ({}) is not equal to number of output states({})".format(len(state_inp_names),
len(state_out_names)))
state_param_num = sum(np.prod(inp_shapes[n]) for n in state_inp_names)
log.debug("State_param_num = {} ({:.1f}Mb)".format(state_param_num, state_param_num*4e-6))
# load model to the device
compiled_model = core.compile_model(ov_encoder, args.device)
infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(args.model, args.device))
sample_inp, freq = wav_read(str(args.input))
sample_size = sample_inp.shape[0]
delay = 0
if "delay" in out_shapes:
infer_request.infer()
delay = infer_request.get_tensor("delay").data[0]
log.info("\tDelay: {} samples".format(delay))
sample_inp = np.pad(sample_inp,((0,delay),))
start_time = perf_counter()
input_size = inp_shapes["input"][1]
res = None
samples_out = []
while sample_inp is not None and sample_inp.shape[0] > 0:
if sample_inp.shape[0] > input_size:
input = sample_inp[:input_size]
sample_inp = sample_inp[input_size:]
else:
input = np.pad(sample_inp, ((0, input_size - sample_inp.shape[0]), ), mode='constant')
sample_inp = None
#forms input
inputs = {"input": input[None, :]}
#add states to input
for n in state_inp_names:
if res:
inputs[n] = infer_request.get_tensor(n.replace('inp', 'out')).data
else:
#on the first iteration fill states by zeros
inputs[n] = np.zeros(inp_shapes[n], dtype=np.float32)
infer_request.infer(inputs)
res = infer_request.get_tensor("output")
samples_out.append(copy.deepcopy(res.data).squeeze(0))
total_latency = perf_counter() - start_time
log.info("Metrics report:")
log.info("\tLatency: {:.1f} ms".format(total_latency * 1e3))
log.info("\tSample length: {:.1f} ms".format(len(samples_out)*input_size*1e3/freq))
log.info("\tSampling freq: {} Hz".format(freq))
#concat output patches and align with input
sample_out = np.concatenate(samples_out, 0)
sample_out = sample_out[delay:sample_size+delay]
wav_write(args.output, sample_out, freq)
|
def main():
args = parse()
core = Core()
log.info("Reading model {}".format(args.model))
ov_encoder = core.read_model(args.model)
inp_shapes = {name: obj.shape for obj in ov_encoder.inputs for name in obj.get_names()}
out_shapes = {name: obj.shape for obj in ov_encoder.outputs for name in obj.get_names()}
state_out_names = [n for n in out_shapes.keys() if "state" in n]
state_inp_names = [n for n in inp_shapes.keys() if "state" in n]
if len(state_inp_names) != len(state_out_names):
raise RuntimeError(
"Number of input states of the model ({}) is not equal to number of output states({})".format(len(state_inp_names),
len(state_out_names)))
state_param_num = sum(np.prod(inp_shapes[n]) for n in state_inp_names)
log.debug("State_param_num = {} ({:.1f}Mb)".format(state_param_num, state_param_num*4e-6))
# load model to the device
compiled_model = core.compile_model(ov_encoder, args.device)
infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(args.model, args.device))
sample_inp, freq = wav_read(str(args.input))
sample_size = sample_inp.shape[0]
delay = 0
if "delay" in out_shapes:
infer_request.infer()
delay = infer_request.get_tensor("delay").data[0]
log.info("\tDelay: {} samples".format(delay))
sample_inp = np.pad(sample_inp,((0,delay),))
start_time = perf_counter()
input_size = inp_shapes["input"][1]
res = None
samples_out = []
while sample_inp is not None and sample_inp.shape[0] > 0:
if sample_inp.shape[0] > input_size:
input = sample_inp[:input_size]
sample_inp = sample_inp[input_size:]
else:
input = np.pad(sample_inp, ((0, input_size - sample_inp.shape[0]), ), mode='constant')
sample_inp = None
#forms input
inputs = {"input": input[None, :]}
#add states to input
for n in state_inp_names:
if res:
inputs[n] = infer_request.get_tensor(n.replace('inp', 'out')).data
else:
#on the first iteration fill states by zeros
inputs[n] = np.zeros(inp_shapes[n], dtype=np.float32)
infer_request.infer(inputs)
res = infer_request.get_tensor("output")
samples_out.append(copy.deepcopy(res.data).squeeze(0))
total_latency = perf_counter() - start_time
log.info("Metrics report:")
log.info("\tLatency: {:.1f} ms".format(total_latency * 1e3))
log.info("\tSample length: {:.1f} ms".format(len(samples_out)*input_size*1e3/freq))
log.info("\tSampling freq: {} Hz".format(freq))
#concat output patches and align with input
sample_out = np.concatenate(samples_out, 0)
sample_out = sample_out[delay:sample_size+delay]
wav_write(args.output, sample_out, freq)
|
59,420 |
def compile_oracle(func):
"""
Parses and type checks the callable ``func`` to compile it into a LogicNetwork that can be
synthesised into a``QuantumCircuit``.
Args:
func (callable): A callable (with type hints) to compile into a logic network.
Returns:
LogicNetwork: An object that can synthesis into a QuantumCircuit (via ``synth()`` method).
"""
source = inspect.getsource(func).strip()
return LogicNetwork(source)
|
def compile_oracle(func):
"""
Parses and type checks the callable ``func`` to compile it into a ``LogicNetwork`` that can
be synthesised into a ``QuantumCircuit``.
Args:
func (callable): A callable (with type hints) to compile into a logic network.
Returns:
LogicNetwork: An object that can synthesis into a QuantumCircuit (via ``synth()`` method).
"""
source = inspect.getsource(func).strip()
return LogicNetwork(source)
|
49,122 |
def test_orient_body_advanced():
q1, q2, q3 = dynamicsymbols('q1:4')
c1, c2, c3 = symbols('c1:4')
u1, u2, u3 = dynamicsymbols('q1:4', 1)
# Test with everything as dynamicsymbols
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, q2, q3), 'zxy')
assert A.dcm(B) == Matrix([
[-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2),
sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)],
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2),
sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)],
[-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[-sin(q3) * cos(q2) * u1 + cos(q3) * u2], [sin(q2) * u1 + u3],
[sin(q3) * u2 + cos(q2) * cos(q3) * u1]])
# Test with constant symbol
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, c2, q3), 131)
assert A.dcm(B) == Matrix([
[cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)],
[sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3),
-sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)],
[sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1),
-sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[cos(c2) * u1 + u3], [-sin(c2) * cos(q3) * u1],
[sin(c2) * sin(q3) * u1]])
# Test all symbols not time dependent
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (c1, c2, c3), 123)
assert B.ang_vel_in(A) == Vector(0)
|
def test_orient_body_advanced():
q1, q2, q3 = dynamicsymbols('q1:4')
c1, c2, c3 = symbols('c1:4')
u1, u2, u3 = dynamicsymbols('q1:4', 1)
# Test with everything as dynamicsymbols
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, q2, q3), 'zxy')
assert A.dcm(B) == Matrix([
[-sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), -sin(q1) * cos(q2),
sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)],
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * cos(q2),
sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)],
[-sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[-sin(q3) * cos(q2) * u1 + cos(q3) * u2],
[sin(q2) * u1 + u3],
[sin(q3) * u2 + cos(q2) * cos(q3) * u1]])
# Test with constant symbol
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (q1, c2, q3), 131)
assert A.dcm(B) == Matrix([
[cos(c2), -sin(c2) * cos(q3), sin(c2) * sin(q3)],
[sin(c2) * cos(q1), -sin(q1) * sin(q3) + cos(c2) * cos(q1) * cos(q3),
-sin(q1) * cos(q3) - sin(q3) * cos(c2) * cos(q1)],
[sin(c2) * sin(q1), sin(q1) * cos(c2) * cos(q3) + sin(q3) * cos(q1),
-sin(q1) * sin(q3) * cos(c2) + cos(q1) * cos(q3)]])
assert B.ang_vel_in(A).to_matrix(B) == Matrix([
[cos(c2) * u1 + u3], [-sin(c2) * cos(q3) * u1],
[sin(c2) * sin(q3) * u1]])
# Test all symbols not time dependent
A, B = ReferenceFrame('A'), ReferenceFrame('B')
B.orient_body_fixed(A, (c1, c2, c3), 123)
assert B.ang_vel_in(A) == Vector(0)
|
1,559 |
def test_recursion_decision_tree_vs_forest_and_gbdt():
# Make sure that the recursion method gives the same results on a
# DecisionTreeRegressor and a GradientBoostingRegressor or a
# RandomForestRegressor with 1 tree and equivalent parameters.
# Purely random dataset to avoid correlated features
n_samples = 100
n_features = 5
X = np.random.RandomState(0).randn(n_samples, n_features)
y = np.random.RandomState(0).randn(n_samples)
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
# set max_depth not too high to avoid splits with same gain but different
# features
max_depth = 5
forest = RandomForestRegressor(n_estimators=1, max_features=None,
bootstrap=False, max_depth=max_depth,
random_state=0)
# The forest will use ensemble.base._set_random_states to set the
# random_state of the tree sub-estimator. We simulate this here to have
# equivalent estimators.
equiv_random_state = check_random_state(0).randint(MAX_RAND_SEED)
gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,
criterion='mse', max_depth=max_depth,
random_state=equiv_random_state)
tree = DecisionTreeRegressor(max_depth=max_depth,
random_state=equiv_random_state)
forest.fit(X, y)
gbdt.fit(X, y)
tree.fit(X, y)
# sanity check
try:
assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)
assert_is_subtree(tree.tree_, forest[0].tree_)
except AssertionError:
# For some reason the trees aren't exactly equal on 32bits, so the PDs
# cannot be equal either.
assert _IS_32BIT
return
grid = np.random.RandomState(0).randn(50).reshape(-1, 1)
for f in range(n_features):
features = np.array([f], dtype=np.int32)
pdp_forest = _partial_dependence_recursion(forest, grid, features)
pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)
pdp_tree = _partial_dependence_recursion(tree, grid, features)
np.testing.assert_allclose(pdp_gbdt, pdp_tree)
np.testing.assert_allclose(pdp_forest, pdp_tree)
|
def test_recursion_decision_tree_vs_forest_and_gbdt():
# Make sure that the recursion method gives the same results on a
# DecisionTreeRegressor and a GradientBoostingRegressor or a
# RandomForestRegressor with 1 tree and equivalent parameters.
# Purely random dataset to avoid correlated features
n_samples = 100
n_features = 5
X = np.random.RandomState(0).randn(n_samples, n_features)
y = np.random.RandomState(0).randn(n_samples)
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
# set max_depth not too high to avoid splits with same gain but different
# features
max_depth = 5
forest = RandomForestRegressor(n_estimators=1, max_features=None,
bootstrap=False, max_depth=max_depth,
random_state=0)
# The forest will use ensemble.base._set_random_states to set the
# random_state of the tree sub-estimator. We simulate this here to have
# equivalent estimators.
equiv_random_state = check_random_state(0).randint(MAX_RAND_SEED)
gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,
criterion='mse', max_depth=max_depth,
random_state=equiv_random_state)
tree = DecisionTreeRegressor(max_depth=max_depth,
random_state=equiv_random_state)
forest.fit(X, y)
gbdt.fit(X, y)
tree.fit(X, y)
# sanity check
try:
assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)
assert_is_subtree(tree.tree_, forest[0].tree_)
except AssertionError:
# For some reason the trees aren't exactly equal on 32bits, so the PDs
# cannot be equal either.
assert _IS_32BIT
return
grid = rng.randn(50).reshape(-1, 1)
for f in range(n_features):
features = np.array([f], dtype=np.int32)
pdp_forest = _partial_dependence_recursion(forest, grid, features)
pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)
pdp_tree = _partial_dependence_recursion(tree, grid, features)
np.testing.assert_allclose(pdp_gbdt, pdp_tree)
np.testing.assert_allclose(pdp_forest, pdp_tree)
|
43,690 |
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
.. math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
37,068 |
def assemble_schedules(schedules, qobj_id, qobj_header, run_config):
"""Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided
"""
if hasattr(run_config, 'instruction_converter'):
instruction_converter = run_config.instruction_converter
else:
instruction_converter = InstructionToQobjConverter
qobj_config = run_config.to_dict()
qubit_lo_range = qobj_config.pop('qubit_lo_range')
meas_lo_range = qobj_config.pop('meas_lo_range')
meas_map = qobj_config.pop('meas_map', None)
memory_slots = qobj_config.pop('memory_slots', None)
max_memory_slot = 0
instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range, **qobj_config)
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = {}
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
if isinstance(instruction, PulseInstruction):
name = instruction.command.name
if name in user_pulselib and instruction.command != user_pulselib[name]:
name = "{0}-{1:x}".format(name, hash(instruction.command.samples.tostring()))
instruction = PulseInstruction(
command=SamplePulse(name=name, samples=instruction.command.samples),
name=instruction.name,
channel=instruction.timeslots.channels[0])
# add samples to pulse library
user_pulselib[name] = instruction.command
if isinstance(instruction, AcquireInstruction):
if meas_map:
# verify all acquires satisfy meas_map
_validate_meas_map(instruction, meas_map)
max_memory_slot = max(max_memory_slot,
*[slot.index for slot in instruction.mem_slots])
qobj_instructions.append(instruction_converter(shift, instruction))
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# set number of memoryslots
qobj_config['memory_slots'] = memory_slots or max_memory_slot
# setup pulse_library
qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib.values()]
# create qobj experiment field
experiments = []
schedule_los = qobj_config.pop('schedule_los', [])
if len(schedule_los) == 1:
lo_dict = schedule_los[0]
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
qobj_config['qubit_lo_freq'] = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
qobj_config['meas_lo_freq'] = m_los
if schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
header=qobj_schedules[0]['header'],
config=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(schedule_los):
# n:n setup
for lo_dict, schedule in zip(schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
config=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
))
qobj_config = PulseQobjConfig(**qobj_config)
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
def assemble_schedules(schedules, qobj_id, qobj_header, run_config):
"""Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided
"""
if hasattr(run_config, 'instruction_converter'):
instruction_converter = run_config.instruction_converter
else:
instruction_converter = InstructionToQobjConverter
qobj_config = run_config.to_dict()
qubit_lo_range = qobj_config.pop('qubit_lo_range')
meas_lo_range = qobj_config.pop('meas_lo_range')
meas_map = qobj_config.pop('meas_map', None)
memory_slots = qobj_config.pop('memory_slots', None)
max_memory_slot = 0
instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range, **qobj_config)
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = {}
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
if isinstance(instruction, PulseInstruction):
name = instruction.command.name
if name in user_pulselib and instruction.command != user_pulselib[name]:
name = "{0}-{1:x}".format(name, hash(instruction.command.samples.tostring()))
instruction = PulseInstruction(
command=SamplePulse(name=name, samples=instruction.command.samples),
name=instruction.name,
channel=instruction.timeslots.channels[0])
# add samples to pulse library
user_pulselib[name] = instruction.command
if isinstance(instruction, AcquireInstruction):
if meas_map:
# verify all acquires satisfy meas_map
_validate_meas_map(instruction, meas_map)
max_memory_slot = max(max_memory_slot,
*[slot.index for slot in instruction.mem_slots])
qobj_instructions.append(instruction_converter(shift, instruction))
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# set number of memory slots
qobj_config['memory_slots'] = memory_slots or max_memory_slot
# setup pulse_library
qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib.values()]
# create qobj experiment field
experiments = []
schedule_los = qobj_config.pop('schedule_los', [])
if len(schedule_los) == 1:
lo_dict = schedule_los[0]
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
qobj_config['qubit_lo_freq'] = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
qobj_config['meas_lo_freq'] = m_los
if schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
header=qobj_schedules[0]['header'],
config=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(schedule_los):
# n:n setup
for lo_dict, schedule in zip(schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
config=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
))
qobj_config = PulseQobjConfig(**qobj_config)
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
11,857 |
def pilinfo(out=None, supported_formats=True):
"""
Prints information about this installation of Pillow.
This function can be called with ``python -m PIL``.
:param out:
The output stream to print to. Defaults to ``sys.stdout`` if ``None``.
:param supported_formats:
If ``True``, a list of all supported image file formats will be printed.
"""
if out is None:
out = sys.stdout
Image.init()
print("-" * 68, file=out)
print("Pillow {}".format(PIL.__version__), file=out)
py_version = sys.version.splitlines()
print("Python {}".format(py_version[0].strip()), file=out)
for py_version in py_version[1:]:
print(" {}".format(py_version.strip()), file=out)
print("-" * 68, file=out)
print(
"Python modules loaded from {}".format(os.path.dirname(Image.__file__)),
file=out,
)
print(
"Binary modules loaded from {}".format(os.path.dirname(Image.core.__file__)),
file=out,
)
print("-" * 68, file=out)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("transp_webp", "WEBP Transparency"),
("webp_mux", "WEBPMUX"),
("webp_anim", "WEBP Animation"),
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF"),
("raqm", "RAQM (Bidirectional Text)"),
("libimagequant", "LIBIMAGEQUANT (Quantization method)"),
("xcb", "XCB (X protocol)"),
]:
if check(name):
if name == "jpg" and check_feature("libjpeg_turbo"):
v = "libjpeg-turbo " + version_feature("libjpeg_turbo")
else:
v = version(name)
if v is not None:
t = "compiled for" if name in ("pil", "jpg") else "loaded"
print("---", feature, "support ok,", t, "version", v, file=out)
else:
print("---", feature, "support ok", file=out)
else:
print("***", feature, "support not installed", file=out)
print("-" * 68, file=out)
if supported_formats:
extensions = collections.defaultdict(list)
for ext, i in Image.EXTENSION.items():
extensions[i].append(ext)
for i in sorted(Image.ID):
line = "{}".format(i)
if i in Image.MIME:
line = "{} {}".format(line, Image.MIME[i])
print(line, file=out)
if i in extensions:
print(
"Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out
)
features = []
if i in Image.OPEN:
features.append("open")
if i in Image.SAVE:
features.append("save")
if i in Image.SAVE_ALL:
features.append("save_all")
if i in Image.DECODERS:
features.append("decode")
if i in Image.ENCODERS:
features.append("encode")
print("Features: {}".format(", ".join(features)), file=out)
print("-" * 68, file=out)
|
def pilinfo(out=None, supported_formats=True):
"""
Prints information about this installation of Pillow.
This function can be called with ``python -m PIL``.
:param out:
The output stream to print to. Defaults to ``sys.stdout`` if ``None``.
:param supported_formats:
If ``True``, a list of all supported image file formats will be printed.
"""
if out is None:
out = sys.stdout
Image.init()
print("-" * 68, file=out)
print("Pillow {}".format(PIL.__version__), file=out)
py_version = sys.version.splitlines()
print("Python {}".format(py_version[0].strip()), file=out)
for py_version in py_version[1:]:
print(" {}".format(py_version.strip()), file=out)
print("-" * 68, file=out)
print(
"Python modules loaded from {}".format(os.path.dirname(Image.__file__)),
file=out,
)
print(
"Binary modules loaded from {}".format(os.path.dirname(Image.core.__file__)),
file=out,
)
print("-" * 68, file=out)
for name, feature in [
("pil", "PIL CORE"),
("tkinter", "TKINTER"),
("freetype2", "FREETYPE2"),
("littlecms2", "LITTLECMS2"),
("webp", "WEBP"),
("transp_webp", "WEBP Transparency"),
("webp_mux", "WEBPMUX"),
("webp_anim", "WEBP Animation"),
("jpg", "JPEG"),
("jpg_2000", "OPENJPEG (JPEG2000)"),
("zlib", "ZLIB (PNG/ZIP)"),
("libtiff", "LIBTIFF"),
("raqm", "RAQM (Bidirectional Text)"),
("libimagequant", "LIBIMAGEQUANT (Quantization method)"),
("xcb", "XCB (X protocol)"),
]:
if check(name):
if name == "jpg" and check_feature("libjpeg_turbo"):
v = "libjpeg-turbo " + version_feature("libjpeg_turbo")
else:
v = version(name)
if v is not None:
t = "compiled for" if name in ("pil", "jpg") else "loaded"
print("---", feature, "support ok,", t, v, file=out)
else:
print("---", feature, "support ok", file=out)
else:
print("***", feature, "support not installed", file=out)
print("-" * 68, file=out)
if supported_formats:
extensions = collections.defaultdict(list)
for ext, i in Image.EXTENSION.items():
extensions[i].append(ext)
for i in sorted(Image.ID):
line = "{}".format(i)
if i in Image.MIME:
line = "{} {}".format(line, Image.MIME[i])
print(line, file=out)
if i in extensions:
print(
"Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out
)
features = []
if i in Image.OPEN:
features.append("open")
if i in Image.SAVE:
features.append("save")
if i in Image.SAVE_ALL:
features.append("save_all")
if i in Image.DECODERS:
features.append("decode")
if i in Image.ENCODERS:
features.append("encode")
print("Features: {}".format(", ".join(features)), file=out)
print("-" * 68, file=out)
|
43,979 |
def _autograd_is_independent_ana(func, *args, **kwargs):
"""Test analytically whether a function is independent of its arguments
using Autograd.
Args:
func (callable): Function to test for independence
args (tuple): Arguments for the function with respect to which
to test for independence
kwargs (dict): Keyword arguments for the function at which
(but not with respect to which) to test for independence
Returns:
bool: Whether the function seems to not depend on it ``args``
analytically. That is, an output of ``True`` means that the
``args`` do *not* feed into the output.
In Autograd, we test this by sending a ``Box`` through the function and
testing whether the output is again a ``Box`` and on the same trace as
the input ``Box``. This means that we can trace actual *independence*
of the output from the input, not only whether the passed function is
constant.
The code is adapted from
`autograd.tracer.py::trace
<https://github.com/HIPS/autograd/blob/master/autograd/tracer.py#L7>`__.
"""
# pylint: disable=protected-access
node = VJPNode.new_root()
with trace_stack.new_trace() as t:
start_box = new_box(args, t, node)
end_box = func(*start_box, **kwargs)
if type(end_box) in [tuple, list]:
if any(isbox(_end) and _end._trace == start_box._trace for _end in end_box):
return False
elif isinstance(end_box, np.ndarray):
if end_box.ndim == 0:
end_box = [end_box.item()]
if any(isbox(_end) and _end._trace == start_box._trace for _end in end_box):
return False
else:
if isbox(end_box) and end_box._trace == start_box._trace:
return False
return True
|
def _autograd_is_independent_ana(func, *args, **kwargs):
"""Test analytically whether a function is independent of its arguments
using Autograd.
Args:
func (callable): Function to test for independence
args (tuple): Arguments for the function with respect to which
to test for independence
kwargs (dict): Keyword arguments for the function at which
(but not with respect to which) to test for independence
Returns:
bool: Whether the function seems to not depend on it ``args``
analytically. That is, an output of ``True`` means that the
``args`` do *not* feed into the output.
In Autograd, we test this by sending a ``Box`` through the function and
testing whether the output is again a ``Box`` and on the same trace as
the input ``Box``. This means that we can trace actual *independence*
of the output from the input, not only whether the passed function is
constant.
The code is adapted from
`autograd.tracer.py::trace
<https://github.com/HIPS/autograd/blob/master/autograd/tracer.py#L7>`__.
"""
# pylint: disable=protected-access
node = VJPNode.new_root()
with trace_stack.new_trace() as t:
start_box = new_box(args, t, node)
end_box = func(*start_box, **kwargs)
if isinstance(end_box, (tuple, list)):
if any(isbox(_end) and _end._trace == start_box._trace for _end in end_box):
return False
elif isinstance(end_box, np.ndarray):
if end_box.ndim == 0:
end_box = [end_box.item()]
if any(isbox(_end) and _end._trace == start_box._trace for _end in end_box):
return False
else:
if isbox(end_box) and end_box._trace == start_box._trace:
return False
return True
|
20,597 |
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var in config.val.qt.environ:
val = config.val.qt.environ[var]
if val == 'None':
os.environ[var] = ''
else:
os.environ[var] = val
|
def init_envvars() -> None:
"""Initialize environment variables which need to be set early."""
if objects.backend == usertypes.Backend.QtWebEngine:
software_rendering = config.val.qt.force_software_rendering
if software_rendering == 'software-opengl':
os.environ['QT_XCB_FORCE_SOFTWARE_OPENGL'] = '1'
elif software_rendering == 'qt-quick':
os.environ['QT_QUICK_BACKEND'] = 'software'
elif software_rendering == 'chromium':
os.environ['QT_WEBENGINE_DISABLE_NOUVEAU_WORKAROUND'] = '1'
else:
assert objects.backend == usertypes.Backend.QtWebKit, objects.backend
if config.val.qt.force_platform is not None:
os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform
if config.val.qt.force_platformtheme is not None:
os.environ['QT_QPA_PLATFORMTHEME'] = config.val.qt.force_platformtheme
if config.val.window.hide_decoration:
os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1'
if config.val.qt.highdpi:
env_var = ('QT_ENABLE_HIGHDPI_SCALING'
if qtutils.version_check('5.14', compiled=False)
else 'QT_AUTO_SCREEN_SCALE_FACTOR')
os.environ[env_var] = '1'
for var in config.val.qt.environ:
val = config.val.qt.environ[var]
if val == 'None':
del os.environ[var]
else:
os.environ[var] = val
|
50,658 |
def split_ics(ics, random_uid=False, default_timezone=None):
"""split an ics string into several according to VEVENT's UIDs
and sort the right VTIMEZONEs accordingly
ignores all other ics components
:type ics: str
:param random_uid: assign random uids to all events
:type random_uid: bool
:rtype list:
"""
cal = cal_from_ics(ics)
tzs = {}
# Since some event could have a Windows format EG : 'New Zealand Standard Time'
# for 'Pacific/Auckland' in Olson format, we should get the last format and put
# it in tzs key to avoid warning in ics_from_list (issue #876)
for item in cal.walk():
if item.name == 'VTIMEZONE':
if item['TZID'] in windows_to_olson.WINDOWS_TO_OLSON:
key = windows_to_olson.WINDOWS_TO_OLSON[item['TZID']]
else:
key = item['TZID']
tzs.update({key: item})
events_grouped = defaultdict(list)
for item in cal.walk():
if item.name == 'VEVENT':
events_grouped[item['UID']].append(item)
else:
continue
return [ics_from_list(events, tzs, random_uid, default_timezone) for uid, events in
sorted(events_grouped.items())]
|
def split_ics(ics, random_uid=False, default_timezone=None):
"""split an ics string into several according to VEVENT's UIDs
and sort the right VTIMEZONEs accordingly
ignores all other ics components
:type ics: str
:param random_uid: assign random uids to all events
:type random_uid: bool
:rtype list:
"""
cal = cal_from_ics(ics)
tzs = {}
# Since some events could have a Windows format timezone (e.g. 'New Zealand
# Standard Time' for 'Pacific/Auckland' in Olson format), we convert any
# Windows format timezones to Olson.
for item in cal.walk():
if item.name == 'VTIMEZONE':
if item['TZID'] in windows_to_olson.WINDOWS_TO_OLSON:
key = windows_to_olson.WINDOWS_TO_OLSON[item['TZID']]
else:
key = item['TZID']
tzs.update({key: item})
events_grouped = defaultdict(list)
for item in cal.walk():
if item.name == 'VEVENT':
events_grouped[item['UID']].append(item)
else:
continue
return [ics_from_list(events, tzs, random_uid, default_timezone) for uid, events in
sorted(events_grouped.items())]
|
30,449 |
def get_current_utc_time():
"""
:return: The current UTC time.
"""
return datetime.utcnow()
|
def get_current_utc_time() -> datetime.datetime:
"""
:return: The current UTC time.
"""
return datetime.utcnow()
|
11,806 |
def subtract_modulo(image1, image2):
"""Subtract two images, without clipping the result. At least one of the
images must be "1" mode.
.. code-block:: python
out = ((image1 - image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
|
def subtract_modulo(image1, image2):
"""Subtract two images, without clipping the result. At least one of the
images must have mode "1".
.. code-block:: python
out = ((image1 - image2) % MAX)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
|
42,895 |
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
def rectangular_symmetric(V, tol=1e-11):
r"""Rectangular decomposition of a unitary into symmetric beamsplitters.
This decomposition starts with the output from :func:`~.clements_phase_end`
and further decomposes each of the T unitaries into two phase-shifters and
two symmetric (50:50) beamsplitters.
The two beamsplitters in this decomposition of T are modeled by :class:`ops.BSgate`
with arguments (pi/4, pi/2), and the two phase-shifters (see :class:`ops.Rgate`)
act on the input mode with the lower index of the two. The phase imposed
by the first phaseshifter (before the first beamsplitter) is named
`external_phase`, while we call the phase shift between the beamsplitters
`internal_phase`.
The algorithm applied in this function makes use of the following identity:
::
Rgate(alpha) | 1
Rgate(beta) | 2
Rgate(phi) | 1
BSgate(theta, 0) | 1, 2
equals
Rgate(phi+alpha-beta) | 1
BSgate(pi/4, pi/2) | 1, 2
Rgate(2*theta+pi) | 1, 2
BSgate(pi/4, pi/2) | 1, 2
Rgate(beta-theta+pi) | 1
Rgate(beta-theta) | 2
The phase-shifts by alpha and beta are thus pushed consecutively through
all the T unitaries of the interferometer and these unitaries are converted
into pairs of symmetric beamsplitters with two phase shifts. The phase
shifts at the end of the interferometer are added to the ones from the
diagonal unitary at the end of the interferometer obtained from :func:`clements_phase_end`.
Args:
V (array): Unitary matrix of size n_size
tol (int): the number of decimal places to use when determining
whether the matrix is unitary
Returns:
tuple[array]: returns a tuple of the form ``(tlist,np.diag(localV))``
where:
* ``tlist``: list containing ``[n,m,internal_phase,external_phase,n_size]`` of the T unitaries needed
* ``localV``: Diagonal unitary matrix to be applied at the end of circuit
"""
tlist, diags = clements_phase_end(V, tol)
new_tlist, new_diags = [], np.ones(len(diags), dtype=diags.dtype)
for i in tlist:
em, en = int(i[0]), int(i[1])
alpha, beta = np.angle(new_diags[em]), np.angle(new_diags[en])
theta, phi = i[2], i[3]
external_phase = np.fmod((phi + alpha - beta), 2 * np.pi)
internal_phase = np.fmod((np.pi + 2.0 * theta), 2 * np.pi)
new_alpha = beta - theta + np.pi
new_beta = 0*np.pi - theta + beta
new_i = [i[0], i[1], internal_phase, external_phase, i[4]]
new_diags[em], new_diags[en] = np.exp(1j*new_alpha), np.exp(1j*new_beta)
new_tlist = new_tlist + [new_i]
new_diags = diags * new_diags
return (new_tlist, new_diags)
|
27,814 |
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
line = f"{verbose_word} {pos}"
line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
if not os.environ.get("CI", False):
available_width = termwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
else:
msg = f" - {msg}"
if msg is not None:
line += msg
return line
|
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
line = f"{verbose_word} {pos}"
line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
if not running_on_ci():
available_width = termwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
else:
msg = f" - {msg}"
if msg is not None:
line += msg
return line
|
7,328 |
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
overlap=.5, log_scale=False, *, threshold_rel=None,
exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float or None, optional
The absolute lower bound for scale space maxima. Local maxima smaller
than `threshold` are ignored. Reduce this to detect blobs with lower
intensities. If `threshold_rel` is also specified, whichever threshold
is larger will be used. If None, `threshold_rel` is used instead.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
threshold_rel : float or None, optional
Minimum intensity of peaks, calculated as
``max(dog_space) * threshold_rel``. Where ``dog_space`` refers to the
stack of Laplacian of Gaussian (LoG) images computed internally. This
should have a value between 0 and 1. If None, `threshold_abs` is used
instead.
exclude_border : tuple of ints, int, or False, optional
If tuple of ints, the length of the tuple must match the input array's
dimensionality. Each element of the tuple will exclude peaks from
within `exclude_border`-pixels of the border of the image along that
dimension.
If nonzero int, `exclude_border` excludes peaks from within
`exclude_border`-pixels of the border of the image.
If zero or False, peaks are identified regardless of their
distance from the border.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
Examples
--------
>>> from skimage import data, feature, exposure
>>> img = data.coins()
>>> img = exposure.equalize_hist(img) # improves detection
>>> feature.blob_log(img, threshold = .3)
array([[124. , 336. , 11.88888889],
[198. , 155. , 11.88888889],
[194. , 213. , 17.33333333],
[121. , 272. , 17.33333333],
[263. , 244. , 17.33333333],
[194. , 276. , 17.33333333],
[266. , 115. , 11.88888889],
[128. , 154. , 11.88888889],
[260. , 174. , 17.33333333],
[198. , 103. , 11.88888889],
[126. , 208. , 11.88888889],
[127. , 102. , 11.88888889],
[263. , 302. , 17.33333333],
[197. , 44. , 11.88888889],
[185. , 344. , 17.33333333],
[126. , 46. , 11.88888889],
[113. , 323. , 1. ]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
# if both min and max sigma are scalar, function returns only one sigma
scalar_sigma = (
True if np.isscalar(max_sigma) and np.isscalar(min_sigma) else False
)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if np.isscalar(max_sigma):
max_sigma = np.full(image.ndim, max_sigma, dtype=float_dtype)
if np.isscalar(min_sigma):
min_sigma = np.full(image.ndim, min_sigma, dtype=float_dtype)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=float_dtype)
max_sigma = np.asarray(max_sigma, dtype=float_dtype)
if log_scale:
start = np.log10(min_sigma)
stop = np.log10(max_sigma)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
# computing gaussian laplace
# average s**2 provides scale invariance
gl_images = [-ndi.gaussian_laplace(image, s) * np.mean(s) ** 2
for s in sigma_list]
image_cube = np.stack(gl_images, axis=-1)
exclude_border = _format_exclude_border(image.ndim, exclude_border)
local_maxima = peak_local_max(
image_cube,
threshold_abs=threshold,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
footprint=np.ones((3,) * (image.ndim + 1)),
)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(float_dtype)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
if scalar_sigma:
# select one sigma column, keeping dimension
sigmas_of_peaks = sigmas_of_peaks[:, 0:1]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
sigma_dim = sigmas_of_peaks.shape[1]
return _prune_blobs(lm, overlap, sigma_dim=sigma_dim)
|
def blob_log(image, min_sigma=1, max_sigma=50, num_sigma=10, threshold=.2,
overlap=.5, log_scale=False, *, threshold_rel=None,
exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Laplacian of Gaussian (LoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
num_sigma : int, optional
The number of intermediate values of standard deviations to consider
between `min_sigma` and `max_sigma`.
threshold : float or None, optional
The absolute lower bound for scale space maxima. Local maxima smaller
than `threshold` are ignored. Reduce this to detect blobs with lower
intensities. If `threshold_rel` is also specified, whichever threshold
is larger will be used. If None, `threshold_rel` is used instead.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
log_scale : bool, optional
If set intermediate values of standard deviations are interpolated
using a logarithmic scale to the base `10`. If not, linear
interpolation is used.
threshold_rel : float or None, optional
Minimum intensity of peaks, calculated as
``max(dog_space) * threshold_rel``. Where ``dog_space`` refers to the
stack of Laplacian of Gaussian (LoG) images computed internally. This
should have a value between 0 and 1. If None, `threshold` is used
instead.
exclude_border : tuple of ints, int, or False, optional
If tuple of ints, the length of the tuple must match the input array's
dimensionality. Each element of the tuple will exclude peaks from
within `exclude_border`-pixels of the border of the image along that
dimension.
If nonzero int, `exclude_border` excludes peaks from within
`exclude_border`-pixels of the border of the image.
If zero or False, peaks are identified regardless of their
distance from the border.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_Laplacian_of_Gaussian
Examples
--------
>>> from skimage import data, feature, exposure
>>> img = data.coins()
>>> img = exposure.equalize_hist(img) # improves detection
>>> feature.blob_log(img, threshold = .3)
array([[124. , 336. , 11.88888889],
[198. , 155. , 11.88888889],
[194. , 213. , 17.33333333],
[121. , 272. , 17.33333333],
[263. , 244. , 17.33333333],
[194. , 276. , 17.33333333],
[266. , 115. , 11.88888889],
[128. , 154. , 11.88888889],
[260. , 174. , 17.33333333],
[198. , 103. , 11.88888889],
[126. , 208. , 11.88888889],
[127. , 102. , 11.88888889],
[263. , 302. , 17.33333333],
[197. , 44. , 11.88888889],
[185. , 344. , 17.33333333],
[126. , 46. , 11.88888889],
[113. , 323. , 1. ]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
# if both min and max sigma are scalar, function returns only one sigma
scalar_sigma = (
True if np.isscalar(max_sigma) and np.isscalar(min_sigma) else False
)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if np.isscalar(max_sigma):
max_sigma = np.full(image.ndim, max_sigma, dtype=float_dtype)
if np.isscalar(min_sigma):
min_sigma = np.full(image.ndim, min_sigma, dtype=float_dtype)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=float_dtype)
max_sigma = np.asarray(max_sigma, dtype=float_dtype)
if log_scale:
start = np.log10(min_sigma)
stop = np.log10(max_sigma)
sigma_list = np.logspace(start, stop, num_sigma)
else:
sigma_list = np.linspace(min_sigma, max_sigma, num_sigma)
# computing gaussian laplace
# average s**2 provides scale invariance
gl_images = [-ndi.gaussian_laplace(image, s) * np.mean(s) ** 2
for s in sigma_list]
image_cube = np.stack(gl_images, axis=-1)
exclude_border = _format_exclude_border(image.ndim, exclude_border)
local_maxima = peak_local_max(
image_cube,
threshold_abs=threshold,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
footprint=np.ones((3,) * (image.ndim + 1)),
)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(float_dtype)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
if scalar_sigma:
# select one sigma column, keeping dimension
sigmas_of_peaks = sigmas_of_peaks[:, 0:1]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
sigma_dim = sigmas_of_peaks.shape[1]
return _prune_blobs(lm, overlap, sigma_dim=sigma_dim)
|
31,565 |
def main():
trace = ''
try:
incident = demisto.incident()
# events = incident.get('labels', {}).get('Drilldown', {})
# print(incident.get('labels', {}))
# print(events)
custom_fields = incident.get('CustomFields', {})
try:
drilldown_results_str = custom_fields.get('identitytable', {})
drilldown_results = json.loads(drilldown_results_str)
except Exception as err:
print(f'json error: {str(err)}')
trace += '1'
if not drilldown_results:
trace += '2'
return CommandResults()
if isinstance(drilldown_results, list):
trace += '3'
events_arr = []
for event in drilldown_results:
try:
trace += '4'
events_arr.append(event)
trace += '5'
except Exception as e:
return_error('json error: ', error=e)
markdown = tableToMarkdown("", events_arr, headers=events_arr[0].keys())
trace += '6'
else:
trace += '7'
markdown = tableToMarkdown("", drilldown_results)
trace += '8'
trace += '9'
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown}
except Exception as exp:
return_error('could not parse Splunk events', error=exp)
finally:
# print('trace: ' + trace)
pass
|
def main():
trace = ''
try:
incident = demisto.incident()
# events = incident.get('labels', {}).get('Drilldown', {})
# print(incident.get('labels', {}))
# print(events)
custom_fields = incident.get('CustomFields', {})
try:
drilldown_results_str = custom_fields.get('identitytable', {})
identity_results = json.loads(drilldown_results_str)
except Exception as err:
print(f'json error: {str(err)}')
trace += '1'
if not drilldown_results:
trace += '2'
return CommandResults()
if isinstance(drilldown_results, list):
trace += '3'
events_arr = []
for event in drilldown_results:
try:
trace += '4'
events_arr.append(event)
trace += '5'
except Exception as e:
return_error('json error: ', error=e)
markdown = tableToMarkdown("", events_arr, headers=events_arr[0].keys())
trace += '6'
else:
trace += '7'
markdown = tableToMarkdown("", drilldown_results)
trace += '8'
trace += '9'
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown}
except Exception as exp:
return_error('could not parse Splunk events', error=exp)
finally:
# print('trace: ' + trace)
pass
|
42,959 |
def resize(subgraph: list, graph: nx.Graph, min_size: int, max_size: int) -> dict:
"""Resize a subgraph to a range of input sizes.
This function uses a greedy approach to iteratively add or remove nodes one at a time to an
input subgraph to reach the range of sizes specified by ``min_size`` and ``max_size``.
When growth is required, the algorithm examines all nodes from the remainder of the graph as
candidates and adds-in the single node with the highest degree relative to the rest of the
subgraph. This results in a graph that is one node larger, and if growth is still required
the algorithm performs the procedure again.
When shrinking is required, the algorithm examines all nodes from within the subgraph as
candidates and removes the single node with lowest degree relative to the subgraph. In both
growth and shrink phases, ties for addition/removal with nodes of equal degree are settled by
uniform random choice.
**Example usage:**
>>> s = data.Planted()
>>> g = nx.Graph(s.adj)
>>> s = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
>>> resize(s, g, 8, 12)
{10: [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
11: [11, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
12: [0, 11, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
9: [20, 21, 22, 24, 25, 26, 27, 28, 29],
8: [20, 21, 22, 24, 25, 26, 27, 29]}
Args:
subgraph (list[int]): a subgraph specified by a list of nodes
graph (nx.Graph): the input graph
min_size (int): minimum size for subgraph to be resized to
max_size (int): maximum size for subgraph to be resized to
Returns:
dict[int, list[int]]: a dictionary of different sizes with corresponding subgraph
"""
nodes = graph.nodes()
subgraph = set(subgraph)
if not subgraph.issubset(nodes):
raise ValueError("Input is not a valid subgraph")
if min_size < 1:
raise ValueError("min_size must be at least 1")
if max_size >= len(nodes):
raise ValueError("max_size must be less than number of nodes in graph")
if max_size < min_size:
raise ValueError("max_size must not be less than min_size")
starting_size = len(subgraph)
if min_size <= starting_size <= max_size:
resized = {starting_size: sorted(subgraph)}
else:
resized = {}
if max_size > starting_size:
grow_subgraph = graph.subgraph(subgraph).copy()
while grow_subgraph.order() < max_size:
grow_nodes = grow_subgraph.nodes()
complement_nodes = nodes - grow_nodes
degrees = [
(c, graph.subgraph(list(grow_nodes) + [c]).degree()[c]) for c in complement_nodes
]
np.random.shuffle(degrees)
to_add = max(degrees, key=lambda x: x[1])
grow_subgraph.add_node(to_add[0])
new_size = grow_subgraph.order()
if min_size <= new_size <= max_size:
resized[new_size] = sorted(grow_subgraph.nodes())
if min_size < starting_size:
shrink_subgraph = graph.subgraph(subgraph).copy()
while shrink_subgraph.order() > min_size:
degrees = list(shrink_subgraph.degree())
np.random.shuffle(degrees)
to_remove = min(degrees, key=lambda x: x[1])
shrink_subgraph.remove_node(to_remove[0])
new_size = shrink_subgraph.order()
if min_size <= new_size <= max_size:
resized[new_size] = sorted(shrink_subgraph.nodes())
return resized
|
def resize(subgraph: list, graph: nx.Graph, min_size: int, max_size: int) -> dict:
"""Resize a subgraph to a range of input sizes.
This function uses a greedy approach to iteratively add or remove nodes one at a time to an
input subgraph to reach the range of sizes specified by ``min_size`` and ``max_size``.
When growth is required, the algorithm examines all nodes from the remainder of the graph as
candidates and adds the single node with the highest degree relative to the rest of the
subgraph. This results in a graph that is one node larger, and if growth is still required
the algorithm performs the procedure again.
When shrinking is required, the algorithm examines all nodes from within the subgraph as
candidates and removes the single node with lowest degree relative to the subgraph. In both
growth and shrink phases, ties for addition/removal with nodes of equal degree are settled by
uniform random choice.
**Example usage:**
>>> s = data.Planted()
>>> g = nx.Graph(s.adj)
>>> s = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
>>> resize(s, g, 8, 12)
{10: [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
11: [11, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
12: [0, 11, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
9: [20, 21, 22, 24, 25, 26, 27, 28, 29],
8: [20, 21, 22, 24, 25, 26, 27, 29]}
Args:
subgraph (list[int]): a subgraph specified by a list of nodes
graph (nx.Graph): the input graph
min_size (int): minimum size for subgraph to be resized to
max_size (int): maximum size for subgraph to be resized to
Returns:
dict[int, list[int]]: a dictionary of different sizes with corresponding subgraph
"""
nodes = graph.nodes()
subgraph = set(subgraph)
if not subgraph.issubset(nodes):
raise ValueError("Input is not a valid subgraph")
if min_size < 1:
raise ValueError("min_size must be at least 1")
if max_size >= len(nodes):
raise ValueError("max_size must be less than number of nodes in graph")
if max_size < min_size:
raise ValueError("max_size must not be less than min_size")
starting_size = len(subgraph)
if min_size <= starting_size <= max_size:
resized = {starting_size: sorted(subgraph)}
else:
resized = {}
if max_size > starting_size:
grow_subgraph = graph.subgraph(subgraph).copy()
while grow_subgraph.order() < max_size:
grow_nodes = grow_subgraph.nodes()
complement_nodes = nodes - grow_nodes
degrees = [
(c, graph.subgraph(list(grow_nodes) + [c]).degree()[c]) for c in complement_nodes
]
np.random.shuffle(degrees)
to_add = max(degrees, key=lambda x: x[1])
grow_subgraph.add_node(to_add[0])
new_size = grow_subgraph.order()
if min_size <= new_size <= max_size:
resized[new_size] = sorted(grow_subgraph.nodes())
if min_size < starting_size:
shrink_subgraph = graph.subgraph(subgraph).copy()
while shrink_subgraph.order() > min_size:
degrees = list(shrink_subgraph.degree())
np.random.shuffle(degrees)
to_remove = min(degrees, key=lambda x: x[1])
shrink_subgraph.remove_node(to_remove[0])
new_size = shrink_subgraph.order()
if min_size <= new_size <= max_size:
resized[new_size] = sorted(shrink_subgraph.nodes())
return resized
|
31,917 |
def get_user_login_profile(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName')
kwargs = {
'UserName': user_name
}
response = client.get_login_profile(**kwargs)
user_profile = response['LoginProfile']
data = ({
'UserName': user_profile.get('UserName', None),
'LoginProfile': {
'CreateDate': user_profile.get('CreateDate', None),
'PasswordResetRequired': user_profile.get('PasswordResetRequired', None)
}
})
ec = {'AWS.IAM.Users(val.UserName && val.UserName === obj.UserName)': data}
human_readable = tableToMarkdown('AWS IAM Login Profile for user {}'.format(user_name),
t=data.get('LoginProfile'),
headers=['CreateDate', 'PasswordResetRequired'],
removeNull=True,
headerTransform=pascalToSpace)
return_outputs(human_readable, ec)
|
def get_user_login_profile(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName')
kwargs = {
'UserName': user_name
}
response = client.get_login_profile(**kwargs)
user_profile = response['LoginProfile']
data = ({
'UserName': user_profile.get('UserName', None),
'LoginProfile': {
'CreateDate': user_profile.get('CreateDate', None),
'PasswordResetRequired': user_profile.get('PasswordResetRequired', None)
}
})
ec = {'AWS.IAM.Users(val.UserName && val.UserName === obj.UserName)': data}
human_readable = tableToMarkdown('AWS IAM Login Profile for user {}'.format(user_name),
t=data.get('LoginProfile'),
headers=['CreateDate', 'PasswordResetRequired'],
removeNull=True,
headerTransform=pascalToSpace)
return_outputs(human_readable, ec, response)
|
1,523 |
def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params,
algorithm, leaf_size, n_jobs):
"""Computes the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features), or (n_samples, n_samples) \
if metric=’precomputed’.
A feature array, or array of distances between samples if
metric='precomputed'
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : string or callable, optional (default='minkowski')
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array, shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array, shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs,
min_samples=min_samples,
working_memory=None)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# This implementation is O(n lg n) theoretically.
Heap = []
for ordering_idx in range(X.shape[0]):
Heap.append((np.inf, ordering_idx))
heapq.heapify(Heap)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
(val, point) = heapq.heappop(Heap)
while processed[point]:
(val, point) = heapq.heappop(Heap)
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed, X=X, nbrs=nbrs,
metric=metric, metric_params=metric_params,
p=p, max_eps=max_eps, Heap=Heap)
if np.all(np.isinf(reachability_)):
warnings.warn("All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning)
return ordering, core_distances_, reachability_, predecessor_
|
def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params,
algorithm, leaf_size, n_jobs):
"""Computes the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features), or (n_samples, n_samples) \
if metric=’precomputed’.
A feature array, or array of distances between samples if
metric='precomputed'
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : string or callable, optional (default='minkowski')
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array, shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array, shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs,
min_samples=min_samples,
working_memory=None)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# This implementation is O(n lg n) theoretically.
Heap = []
for ordering_idx in range(X.shape[0]):
Heap.append((np.inf, ordering_idx))
heapq.heapify(Heap)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
(val, point) = heapq.heappop(Heap)
while processed[point]:
val, point = heapq.heappop(Heap)
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed, X=X, nbrs=nbrs,
metric=metric, metric_params=metric_params,
p=p, max_eps=max_eps, Heap=Heap)
if np.all(np.isinf(reachability_)):
warnings.warn("All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning)
return ordering, core_distances_, reachability_, predecessor_
|
43,141 |
def batcher(device):
def batcher_dev(batch):
graphs, labels = zip(*batch)
batch_graphs = dgl.batch(graphs)
labels = torch.stack(labels, 0)
return AlchemyBatcher(graph=batch_graphs, label=labels)
return batcher_dev
|
def batcher():
def batcher_dev(batch):
graphs, labels = zip(*batch)
batch_graphs = dgl.batch(graphs)
labels = torch.stack(labels, 0)
return AlchemyBatcher(graph=batch_graphs, label=labels)
return batcher_dev
|
54,079 |
def load_arguments(self, _):
with self.argument_context('spring-cloud') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('name', options_list=[
'--name', '-n'], help='Name of Azure Spring Cloud.')
# A refactoring work item to move validators to command level to reduce the duplications.
# https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/
with self.argument_context('spring-cloud create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location)
c.argument('sku', arg_type=sku_type, default='Standard')
c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters)
c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters)
c.argument('app_subnet', help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters)
c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet)
c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group)
c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group)
c.argument('enable_java_agent',
arg_type=get_three_state_flag(),
help="Java in process agent is now GA-ed and used by default when Application Insights enabled. "
"This parameter is no longer needed and will be removed in future release.",
validator=validate_java_agent_parameters,
deprecate_info=c.deprecate(target='--enable-java-agent', hide=True))
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_create)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_create)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Minimum is 0, maximum is 100.",
validator=validate_tracing_parameters_asc_create)
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_create)
c.argument('zone_redundant',
arg_type=get_three_state_flag(),
help="Create your Azure Spring Cloud service in an Azure availability zone or not, "
"this could only be supported in several regions at the moment ",
default=False, is_preview=True)
with self.argument_context('spring-cloud update') as c:
c.argument('sku', arg_type=sku_type)
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key',
redirect='az spring-cloud app-insights update --app-insights-key',
hide=True))
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights',
redirect='az spring-cloud app-insights update --app-insights',
hide=True))
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights',
redirect='az spring-cloud app-insights update --disable',
hide=True))
for scope in ['spring-cloud create', 'spring-cloud update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('spring-cloud test-endpoint renew-key') as c:
c.argument('type', type=str, arg_type=get_enum_type(
TestKeyType), help='Type of test-endpoint key')
with self.argument_context('spring-cloud app') as c:
c.argument('service', service_name_type)
c.argument('name', name_type, help='Name of app.')
with self.argument_context('spring-cloud app create') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.', default=False,
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('assign_identity', arg_type=get_three_state_flag(),
help='If true, assign managed service identity.')
c.argument('cpu', arg_type=cpu_type, default="1")
c.argument('memory', arg_type=memort_type, default="1Gi")
c.argument('instance_count', type=int,
default=1, help='Number of instance.', validator=validate_instance_count)
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str,
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app update') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.',
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False)
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'],
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app append-persistent-storage') as c:
c.argument('storage_name', type=str,
help='Name of the storage resource you created in Azure Spring Cloud.')
c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.')
c.argument('share_name', type=str,
help="The name of the pre-created file share. "
"ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume.")
c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.')
c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None)
c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False)
for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('main_entry', options_list=[
'--main-entry', '-m'], help="The path to the .NET executable relative to zip root.")
for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']:
with self.argument_context(scope) as c:
c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist)
with self.argument_context('spring-cloud app identity assign') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
def prepare_logs_argument(c):
'''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.'''
c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.')
c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines)
c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true')
c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since)
c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}',
help='Format JSON logs if structured log is enabled')
with self.argument_context('spring-cloud app logs') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app log tail') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app set-deployment') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment)
for scope in ['spring-cloud app create', 'spring-cloud app update']:
with self.argument_context(scope) as c:
c.argument('enable_persistent_storage', arg_type=get_three_state_flag(),
help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.')
for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']:
with self.argument_context(scope) as c:
c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion),
help='Runtime version of used language')
c.argument('jvm_options', type=str, validator=validate_jvm_options,
help="A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'")
c.argument('env', env_type)
c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.')
with self.argument_context('spring-cloud app scale') as c:
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']:
with self.argument_context(scope) as c:
c.argument(
'artifact_path', options_list=['--artifact-path',
c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True),
c.deprecate(target='-p', redirect='--artifact-path', hide=True)],
help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar)
c.argument(
'disable_validation', arg_type=get_three_state_flag(),
help='If true, disable jar validation.')
c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True)
c.argument(
'main_entry', options_list=[
'--main-entry', '-m'], help="A string containing the path to the .NET executable relative to zip root.")
c.argument(
'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy')
c.argument(
'version', help='Deployment version, keep unchanged if not set.')
c.argument(
'container_image', help='The container image tag.', arg_group='Custom Container')
c.argument(
'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container')
c.argument(
'registry_username', help='The username of the container registry.', arg_group='Custom Container')
c.argument(
'registry_password', help='The password of the container registry.', arg_group='Custom Container')
c.argument(
'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container')
c.argument(
'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container')
with self.argument_context('spring-cloud app deploy') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.',
action='store_true')
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
with self.argument_context('spring-cloud app deployment') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('name', name_type, help='Name of deployment.')
for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
with self.argument_context('spring-cloud app deployment start-jfr') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
c.argument('duration', type=str, default="60s", help='Duration of JFR.')
with self.argument_context('spring-cloud app binding') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=active_deployment_exist_under_app)
c.argument('name', name_type, help='Name of service binding.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']:
with self.argument_context(scope) as c:
c.argument('resource_id', validator=validate_resource_id,
help='Azure resource ID of the service to bind with.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']:
with self.argument_context(scope) as c:
c.argument(
'database_name', help='Name of database. Required for mongo, sql, gremlin')
c.argument(
'key_space', help='Cassandra key space. Required for cassandra')
c.argument('collection_name',
help='Name of collection. Required for gremlin')
with self.argument_context('spring-cloud app binding cosmos add') as c:
c.argument('api_type', help='Type of API.', arg_type=get_enum_type(
ApiType), validator=validate_cosmos_type)
for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']:
with self.argument_context(scope) as c:
c.argument('key', help='API key of the service.')
c.argument('username', help='Username of the database')
c.argument('database_name', help='Database name')
for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']:
with self.argument_context(scope) as c:
c.argument('key', help='Api key of the service.')
c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False)
with self.argument_context('spring-cloud app append-loaded-public-certificate') as c:
c.argument('certificate_name', help='Name of the certificate to be appended')
c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False)
with self.argument_context('spring-cloud config-server set') as c:
c.argument('config_file',
help='A yaml file path for the configuration of Spring Cloud config server')
for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument('uri', help='Uri of the added config.')
c.argument('label', help='Label of the added config.')
c.argument(
'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm',
help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('strict_host_key_checking',
help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']:
with self.argument_context(scope) as c:
c.argument('repo_name', help='Name of the repo.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument(
'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns')
with self.argument_context('spring-cloud test-endpoint list') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name)
with self.argument_context('spring-cloud storage') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of storage.')
with self.argument_context('spring-cloud storage add') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud storage update') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud certificate') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of certificate.')
with self.argument_context('spring-cloud certificate add') as c:
c.argument('vault_uri', help='The key vault uri where store the certificate')
c.argument('vault_certificate_name', help='The certificate name in key vault')
c.argument('only_public_cert', arg_type=get_three_state_flag(),
help='If true, only import public certificate part from key vault.', default=False)
c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'],
help='A file path for the public certificate to be uploaded')
with self.argument_context('spring-cloud certificate list') as c:
c.argument('certificate_type', help='Type of uploaded certificate',
arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate']))
with self.argument_context('spring-cloud app custom-domain') as c:
c.argument('service', service_name_type)
c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app)
c.argument('domain_name', help='Name of custom domain.')
with self.argument_context('spring-cloud app custom-domain bind') as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app custom-domain update') as c:
c.argument('certificate', help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app-insights update') as c:
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_app_insights_parameters)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_app_insights_parameters)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Maximum is 100.",
validator=validate_app_insights_parameters)
c.argument('disable',
arg_type=get_three_state_flag(),
help="Disable Application Insights.",
validator=validate_app_insights_parameters)
for scope in ['spring-cloud application-configuration-service', 'spring-cloud service-registry',
'spring-cloud gateway', 'spring-cloud api-portal']:
with self.argument_context(scope) as c:
c.argument('service', service_name_type, validator=only_support_enterprise)
with self.argument_context('spring-cloud service-registry bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud service-registry unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud application-configuration-service bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud application-configuration-service unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
for scope in ['spring-cloud application-configuration-service git repo add',
'spring-cloud application-configuration-service git repo update']:
with self.argument_context(scope) as c:
c.argument('patterns',
help='Required patterns used to search in Git repositories. '
'For each pattern, use format like {application} or {application}/{profile} '
'instead of {application}-{profile}.yml, and separate them by comma.',
validator=validate_acs_patterns),
c.argument('uri', help="Required Git URI.", validator=validate_git_uri),
c.argument('label', help="Required branch name to search in the Git repository."),
c.argument('search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm', help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('host_key_check', help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud application-configuration-service git repo add',
'spring-cloud application-configuration-service git repo update',
'spring-cloud application-configuration-service git repo remove']:
with self.argument_context(scope) as c:
c.argument('name', help="Required unique name to label each item of git configs.")
for scope in ['spring-cloud gateway update',
'spring-cloud api-portal update']:
with self.argument_context(scope) as c:
c.argument('instance_count', type=int, help='Number of instance.')
c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.')
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access endpoint via https')
c.argument('scope', arg_group='Single Sign On (SSO)', help="Comma-separated list of the specific actions applications can be allowed to do on a user's behalf.")
c.argument('client_id', arg_group='Single Sign On (SSO)', help="The public identifier for the application.")
c.argument('client_secret', arg_group='Single Sign On (SSO)', help="The secret known only to the application and the authorization server.")
c.argument('issuer_uri', arg_group='Single Sign On (SSO)', help="The URI of Issuer Identifier.")
with self.argument_context('spring-cloud gateway update') as c:
c.argument('cpu', type=str, help='CPU resource quantity. Should be 500m or number of CPU cores.')
c.argument('memory', type=str, help='Memory resource quantity. Should be 512Mi or #Gi, e.g., 1Gi, 3Gi.')
c.argument('api_title', arg_group='API metadata', help="Title describing the context of the APIs available on the Gateway instance.")
c.argument('api_description', arg_group='API metadata', help="Detailed description of the APIs available on the Gateway instance.")
c.argument('api_doc_location', arg_group='API metadata', help="Location of additional documentation for the APIs available on the Gateway instance.")
c.argument('api_version', arg_group='API metadata', help="Version of APIs available on this Gateway instance.")
c.argument('server_url', arg_group='API metadata', help="Base URL that API consumers will use to access APIs on the Gateway instance.")
c.argument('allowed_origins', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed origins to make cross-site requests. The special value `*` allows all domains.")
c.argument('allowed_methods', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed HTTP methods on cross-site requests. The special value `*` allows all methods.")
c.argument('allowed_headers', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed headers in cross-site requests. The special value `*` allows actual requests to send any header.")
c.argument('max_age', arg_group='Cross-origin Resource Sharing (CORS)', type=int,
help="How long, in seconds, the response from a pre-flight request can be cached by clients.")
c.argument('allow_credentials', arg_group='Cross-origin Resource Sharing (CORS)', arg_type=get_three_state_flag(),
help="Whether user credentials are supported on cross-site requests.")
c.argument('exposed_headers', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of HTTP response headers to expose for cross-site requests.")
for scope in ['spring-cloud gateway custom-domain',
'spring-cloud api-portal custom-domain']:
with self.argument_context(scope) as c:
c.argument('domain_name', help='Name of custom domain.')
for scope in ['spring-cloud gateway custom-domain bind',
'spring-cloud gateway custom-domain update',
'spring-cloud api-portal custom-domain bind',
'spring-cloud api-portal custom-domain update']:
with self.argument_context(scope) as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
with self.argument_context('spring-cloud gateway route-config') as c:
c.argument('name', help='Name of route config.')
for scope in ['spring-cloud gateway route-config create',
'spring-cloud gateway route-config update']:
with self.argument_context(scope) as c:
c.argument('app_name', type=str, help="The Azure Spring Cloud app name to configure the route.")
c.argument('routes_json', type=str, help="The JSON array of API routes.", validator=validate_routes)
c.argument('routes_file', type=str, help="The file path of JSON array of API routes.", validator=validate_routes)
|
def load_arguments(self, _):
with self.argument_context('spring-cloud') as c:
c.argument('resource_group', arg_type=resource_group_name_type)
c.argument('name', options_list=[
'--name', '-n'], help='Name of Azure Spring Cloud.')
# A refactoring work item to move validators to command level to reduce the duplications.
# https://dev.azure.com/msazure/AzureDMSS/_workitems/edit/11002857/
with self.argument_context('spring-cloud create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=validate_location)
c.argument('sku', arg_type=sku_type, default='Standard')
c.argument('reserved_cidr_range', help='Comma-separated list of IP address ranges in CIDR format. The IP ranges are reserved to host underlying Azure Spring Cloud infrastructure, which should be 3 at least /16 unused IP ranges, must not overlap with any Subnet IP ranges.', validator=validate_vnet_required_parameters)
c.argument('vnet', help='The name or ID of an existing Virtual Network into which to deploy the Spring Cloud instance.', validator=validate_vnet_required_parameters)
c.argument('app_subnet', help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud app. Required when deploying into a Virtual Network. Smaller subnet sizes are supported, please refer: https://aka.ms/azure-spring-cloud-smaller-subnet-vnet-docs', validator=validate_vnet_required_parameters)
c.argument('service_runtime_subnet', options_list=['--service-runtime-subnet', '--svc-subnet'], help='The name or ID of an existing subnet in "vnet" into which to deploy the Spring Cloud service runtime. Required when deploying into a Virtual Network.', validator=validate_vnet)
c.argument('service_runtime_network_resource_group', options_list=['--service-runtime-network-resource-group', '--svc-nrg'], help='The resource group where all network resources for Azure Spring Cloud service runtime will be created in.', validator=validate_node_resource_group)
c.argument('app_network_resource_group', options_list=['--app-network-resource-group', '--app-nrg'], help='The resource group where all network resources for apps will be created in.', validator=validate_node_resource_group)
c.argument('enable_java_agent',
arg_type=get_three_state_flag(),
help="Java in process agent is now GA-ed and used by default when Application Insights enabled. "
"This parameter is no longer needed and will be removed in future release.",
validator=validate_java_agent_parameters,
deprecate_info=c.deprecate(target='--enable-java-agent', hide=True))
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_create)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_create)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Minimum is 0, maximum is 100.",
validator=validate_tracing_parameters_asc_create)
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_create)
c.argument('zone_redundant',
arg_type=get_three_state_flag(),
help="Create your Azure Spring Cloud service in an Azure availability zone or not, "
"this could only be supported in several regions at the moment ",
default=False, is_preview=True)
with self.argument_context('spring-cloud update') as c:
c.argument('sku', arg_type=sku_type)
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights-key',
redirect='az spring-cloud app-insights update --app-insights-key',
hide=True))
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --app-insights',
redirect='az spring-cloud app-insights update --app-insights',
hide=True))
c.argument('disable_app_insights',
arg_type=get_three_state_flag(),
help="Disable Application Insights, "
"if not disabled and no existing Application Insights specified with "
"--app-insights-key or --app-insights, "
"will create a new Application Insights instance in the same resource group.",
validator=validate_tracing_parameters_asc_update,
deprecate_info=c.deprecate(target='az spring-cloud update --disable-app-insights',
redirect='az spring-cloud app-insights update --disable',
hide=True))
for scope in ['spring-cloud create', 'spring-cloud update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
with self.argument_context('spring-cloud test-endpoint renew-key') as c:
c.argument('type', type=str, arg_type=get_enum_type(
TestKeyType), help='Type of test-endpoint key')
with self.argument_context('spring-cloud app') as c:
c.argument('service', service_name_type)
c.argument('name', name_type, help='Name of app.')
with self.argument_context('spring-cloud app create') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.', default=False,
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('assign_identity', arg_type=get_three_state_flag(),
help='If true, assign managed service identity.')
c.argument('cpu', arg_type=cpu_type, default="1")
c.argument('memory', arg_type=memort_type, default="1Gi")
c.argument('instance_count', type=int,
default=1, help='Number of instance.', validator=validate_instance_count)
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', options_list=['--loaded-public-certificate-file', '-f'], type=str,
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app update') as c:
c.argument('assign_endpoint', arg_type=get_three_state_flag(),
help='If true, assign endpoint URL for direct access.',
options_list=['--assign-endpoint', c.deprecate(target='--is-public', redirect='--assign-endpoint', hide=True)])
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access app via https', default=False)
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
c.argument('persistent_storage', type=str,
help='A json file path for the persistent storages to be mounted to the app')
c.argument('loaded_public_certificate_file', type=str, options_list=['--loaded-public-certificate-file', '-f'],
help='A json file path indicates the certificates which would be loaded to app')
with self.argument_context('spring-cloud app append-persistent-storage') as c:
c.argument('storage_name', type=str,
help='Name of the storage resource you created in Azure Spring Cloud.')
c.argument('persistent_storage_type', options_list=['--persistent-storage-type', '-t'], type=str, help='Type of the persistent storage volumed.')
c.argument('share_name', type=str,
help="The name of the pre-created file share. "
"ShareName should be provided only if the type of the persistent storage volume is AzureFileVolume.")
c.argument('mount_path', type=str, help='The path for the persistent storage volume to be mounted.')
c.argument('mount_options', nargs='+', help='[optional] The mount options for the persistent storage volume.', default=None)
c.argument('read_only', arg_type=get_three_state_flag(), help='[optional] If true, the persistent storage volume will be read only.', default=False)
for scope in ['spring-cloud app update', 'spring-cloud app start', 'spring-cloud app stop', 'spring-cloud app restart', 'spring-cloud app deploy', 'spring-cloud app scale', 'spring-cloud app set-deployment', 'spring-cloud app show-deploy-log']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('main_entry', options_list=[
'--main-entry', '-m'], help="The path to the .NET executable relative to zip root.")
for scope in ['spring-cloud app identity', 'spring-cloud app unset-deployment']:
with self.argument_context(scope) as c:
c.argument('name', name_type, help='Name of app.', validator=active_deployment_exist)
with self.argument_context('spring-cloud app identity assign') as c:
c.argument('scope', help="The scope the managed identity has access to")
c.argument('role', help="Role name or id the managed identity will be assigned")
def prepare_logs_argument(c):
'''`app log tail` is deprecated. `app logs` is the new choice. They share the same command processor.'''
c.argument('instance', options_list=['--instance', '-i'], help='Name of an existing instance of the deployment.')
c.argument('lines', type=int, help='Number of lines to show. Maximum is 10000', validator=validate_log_lines)
c.argument('follow', options_list=['--follow ', '-f'], help='Specify if the logs should be streamed.', action='store_true')
c.argument('since', help='Only return logs newer than a relative duration like 5s, 2m, or 1h. Maximum is 1h', validator=validate_log_since)
c.argument('limit', type=int, help='Maximum kilobytes of logs to return. Ceiling number is 2048.', validator=validate_log_limit)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('format_json', nargs='?', const='{timestamp} {level:>5} [{thread:>15.15}] {logger{39}:<40.40}: {message}\n{stackTrace}',
help='Format JSON logs if structured log is enabled')
with self.argument_context('spring-cloud app logs') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app log tail') as c:
prepare_logs_argument(c)
with self.argument_context('spring-cloud app set-deployment') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app.', validator=ensure_not_active_deployment)
for scope in ['spring-cloud app create', 'spring-cloud app update']:
with self.argument_context(scope) as c:
c.argument('enable_persistent_storage', arg_type=get_three_state_flag(),
help='If true, mount a 50G (Standard Pricing tier) or 1G (Basic Pricing tier) disk with default path.')
for scope in ['spring-cloud app update', 'spring-cloud app deployment create', 'spring-cloud app deploy', 'spring-cloud app create']:
with self.argument_context(scope) as c:
c.argument('runtime_version', arg_type=get_enum_type(RuntimeVersion),
help='Runtime version of used language')
c.argument('jvm_options', type=str, validator=validate_jvm_options,
help="A string containing jvm options, use '=' instead of ' ' for this argument to avoid bash parse error, eg: --jvm-options='-Xms1024m -Xmx2048m'")
c.argument('env', env_type)
c.argument('disable_probe', arg_type=get_three_state_flag(), help='If true, disable the liveness and readiness probe.')
with self.argument_context('spring-cloud app scale') as c:
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
for scope in ['spring-cloud app deploy', 'spring-cloud app deployment create']:
with self.argument_context(scope) as c:
c.argument(
'artifact_path', options_list=['--artifact-path',
c.deprecate(target='--jar-path', redirect='--artifact-path', hide=True),
c.deprecate(target='-p', redirect='--artifact-path', hide=True)],
help='Deploy the specified pre-built artifact (jar or netcore zip).', validator=validate_jar)
c.argument(
'disable_validation', arg_type=get_three_state_flag(),
help='If true, disable jar validation.')
c.argument('builder', help='(Enterprise Tier Only) Build service builder used to build the executable.', default='default', is_preview=True)
c.argument(
'main_entry', options_list=[
'--main-entry', '-m'], help="A string containing the path to the .NET executable relative to zip root.")
c.argument(
'target_module', help='Child module to be deployed, required for multiple jar packages built from source code.', arg_group='Source Code deploy')
c.argument(
'version', help='Deployment version, keep unchanged if not set.')
c.argument(
'container_image', help='The container image tag.', arg_group='Custom Container')
c.argument(
'container_registry', default='docker.io', help='The registry of the container image.', arg_group='Custom Container')
c.argument(
'registry_username', help='The username of the container registry.', arg_group='Custom Container')
c.argument(
'registry_password', help='The password of the container registry.', arg_group='Custom Container')
c.argument(
'container_command', help='The command of the container image.', nargs='*', arg_group='Custom Container')
c.argument(
'container_args', help='The arguments of the container image.', nargs='*', arg_group='Custom Container')
with self.argument_context('spring-cloud app deploy') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloy_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('source_path', arg_type=source_path_type, validator=validate_deloyment_create_path)
with self.argument_context('spring-cloud app deployment create') as c:
c.argument('skip_clone_settings', help='Create staging deployment will automatically copy settings from production deployment.',
action='store_true')
c.argument('cpu', arg_type=cpu_type)
c.argument('memory', arg_type=memort_type)
c.argument('instance_count', type=int, help='Number of instance.', validator=validate_instance_count)
with self.argument_context('spring-cloud app deployment') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('name', name_type, help='Name of deployment.')
for scope in ['spring-cloud app deployment generate-heap-dump', 'spring-cloud app deployment generate-thread-dump']:
with self.argument_context(scope) as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
with self.argument_context('spring-cloud app deployment start-jfr') as c:
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=fulfill_deployment_param)
c.argument('app_instance', help='Target app instance you want to dump.')
c.argument('file_path', help='The mount file path for your dump file.')
c.argument('duration', type=str, default="60s", help='Duration of JFR.')
with self.argument_context('spring-cloud app binding') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=active_deployment_exist_under_app)
c.argument('name', name_type, help='Name of service binding.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding mysql add', 'spring-cloud app binding redis add']:
with self.argument_context(scope) as c:
c.argument('resource_id', validator=validate_resource_id,
help='Azure resource ID of the service to bind with.')
for scope in ['spring-cloud app binding cosmos add', 'spring-cloud app binding cosmos update']:
with self.argument_context(scope) as c:
c.argument(
'database_name', help='Name of database. Required for mongo, sql, gremlin')
c.argument(
'key_space', help='Cassandra key space. Required for cassandra')
c.argument('collection_name',
help='Name of collection. Required for gremlin')
with self.argument_context('spring-cloud app binding cosmos add') as c:
c.argument('api_type', help='Type of API.', arg_type=get_enum_type(
ApiType), validator=validate_cosmos_type)
for scope in ['spring-cloud app binding mysql add', 'spring-cloud app binding mysql update']:
with self.argument_context(scope) as c:
c.argument('key', help='API key of the service.')
c.argument('username', help='Username of the database')
c.argument('database_name', help='Database name')
for scope in ['spring-cloud app binding redis add', 'spring-cloud app binding redis update']:
with self.argument_context(scope) as c:
c.argument('key', help='Api key of the service.')
c.argument('disable_ssl', arg_type=get_three_state_flag(), help='If true, disable SSL. If false, enable SSL.', default=False)
with self.argument_context('spring-cloud app append-loaded-public-certificate') as c:
c.argument('certificate_name', help='Name of the certificate to be appended')
c.argument('load_trust_store', arg_type=get_three_state_flag(), help='If true, the certificate would be loaded into trust store for Java applications', default=False)
with self.argument_context('spring-cloud config-server set') as c:
c.argument('config_file',
help='A yaml file path for the configuration of Spring Cloud config server')
for scope in ['spring-cloud config-server git set', 'spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument('uri', help='Uri of the added config.')
c.argument('label', help='Label of the added config.')
c.argument(
'search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm',
help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('strict_host_key_checking',
help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update', 'spring-cloud config-server git repo remove']:
with self.argument_context(scope) as c:
c.argument('repo_name', help='Name of the repo.')
for scope in ['spring-cloud config-server git repo add', 'spring-cloud config-server git repo update']:
with self.argument_context(scope) as c:
c.argument(
'pattern', help='Pattern of the repo, use , as delimiter for multiple patterns')
with self.argument_context('spring-cloud test-endpoint list') as c:
c.argument('app', app_name_type, help='Name of app.',
validator=validate_app_name)
c.argument('deployment', options_list=[
'--deployment', '-d'], help='Name of an existing deployment of the app. Default to the production deployment if not specified.', validator=validate_deployment_name)
with self.argument_context('spring-cloud storage') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of storage.')
with self.argument_context('spring-cloud storage add') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud storage update') as c:
c.argument('storage_type', help='The type of the torage. e.g. StorageAccount')
c.argument('account_name', help='The name of the storage account.')
c.argument('account_key', help='The account key of the storage account.')
with self.argument_context('spring-cloud certificate') as c:
c.argument('service', service_name_type)
c.argument('name', help='Name of certificate.')
with self.argument_context('spring-cloud certificate add') as c:
c.argument('vault_uri', help='The key vault uri where store the certificate')
c.argument('vault_certificate_name', help='The certificate name in key vault')
c.argument('only_public_cert', arg_type=get_three_state_flag(),
help='If true, only import public certificate part from key vault.', default=False)
c.argument('public_certificate_file', options_list=['--public-certificate-file', '-f'],
help='A file path for the public certificate to be uploaded')
with self.argument_context('spring-cloud certificate list') as c:
c.argument('certificate_type', help='Type of uploaded certificate',
arg_type=get_enum_type(['KeyVaultCertificate', 'ContentCertificate']))
with self.argument_context('spring-cloud app custom-domain') as c:
c.argument('service', service_name_type)
c.argument('app', app_name_type, help='Name of app.', validator=active_deployment_exist_under_app)
c.argument('domain_name', help='Name of custom domain.')
with self.argument_context('spring-cloud app custom-domain bind') as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app custom-domain update') as c:
c.argument('certificate', help='Certificate name in Azure Spring Cloud.')
c.argument('enable_end_to_end_tls', arg_type=get_three_state_flag(), help='If true, enable end to end tls')
with self.argument_context('spring-cloud app-insights update') as c:
c.argument('app_insights_key',
help="Connection string (recommended) or Instrumentation key of the existing Application Insights.",
validator=validate_app_insights_parameters)
c.argument('app_insights',
help="Name of the existing Application Insights in the same Resource Group. "
"Or Resource ID of the existing Application Insights in a different Resource Group.",
validator=validate_app_insights_parameters)
c.argument('sampling_rate',
type=float,
help="Sampling Rate of application insights. Maximum is 100.",
validator=validate_app_insights_parameters)
c.argument('disable',
arg_type=get_three_state_flag(),
help="Disable Application Insights.",
validator=validate_app_insights_parameters)
for scope in ['application-configuration-service', 'service-registry',
'gateway', 'api-portal']:
with self.argument_context('spring-cloud {}'.format(scope )) as c:
c.argument('service', service_name_type, validator=only_support_enterprise)
with self.argument_context('spring-cloud service-registry bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud service-registry unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud application-configuration-service bind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
with self.argument_context('spring-cloud application-configuration-service unbind') as c:
c.argument('app', app_name_type, help='Name of app.', validator=validate_app_name)
for scope in ['spring-cloud application-configuration-service git repo add',
'spring-cloud application-configuration-service git repo update']:
with self.argument_context(scope) as c:
c.argument('patterns',
help='Required patterns used to search in Git repositories. '
'For each pattern, use format like {application} or {application}/{profile} '
'instead of {application}-{profile}.yml, and separate them by comma.',
validator=validate_acs_patterns),
c.argument('uri', help="Required Git URI.", validator=validate_git_uri),
c.argument('label', help="Required branch name to search in the Git repository."),
c.argument('search_paths', help='search_paths of the added config, use , as delimiter for multiple paths.')
c.argument('username', help='Username of the added config.')
c.argument('password', help='Password of the added config.')
c.argument('host_key', help='Host key of the added config.')
c.argument('host_key_algorithm', help='Host key algorithm of the added config.')
c.argument('private_key', help='Private_key of the added config.')
c.argument('host_key_check', help='Strict_host_key_checking of the added config.')
for scope in ['spring-cloud application-configuration-service git repo add',
'spring-cloud application-configuration-service git repo update',
'spring-cloud application-configuration-service git repo remove']:
with self.argument_context(scope) as c:
c.argument('name', help="Required unique name to label each item of git configs.")
for scope in ['spring-cloud gateway update',
'spring-cloud api-portal update']:
with self.argument_context(scope) as c:
c.argument('instance_count', type=int, help='Number of instance.')
c.argument('assign_endpoint', arg_type=get_three_state_flag(), help='If true, assign endpoint URL for direct access.')
c.argument('https_only', arg_type=get_three_state_flag(), help='If true, access endpoint via https')
c.argument('scope', arg_group='Single Sign On (SSO)', help="Comma-separated list of the specific actions applications can be allowed to do on a user's behalf.")
c.argument('client_id', arg_group='Single Sign On (SSO)', help="The public identifier for the application.")
c.argument('client_secret', arg_group='Single Sign On (SSO)', help="The secret known only to the application and the authorization server.")
c.argument('issuer_uri', arg_group='Single Sign On (SSO)', help="The URI of Issuer Identifier.")
with self.argument_context('spring-cloud gateway update') as c:
c.argument('cpu', type=str, help='CPU resource quantity. Should be 500m or number of CPU cores.')
c.argument('memory', type=str, help='Memory resource quantity. Should be 512Mi or #Gi, e.g., 1Gi, 3Gi.')
c.argument('api_title', arg_group='API metadata', help="Title describing the context of the APIs available on the Gateway instance.")
c.argument('api_description', arg_group='API metadata', help="Detailed description of the APIs available on the Gateway instance.")
c.argument('api_doc_location', arg_group='API metadata', help="Location of additional documentation for the APIs available on the Gateway instance.")
c.argument('api_version', arg_group='API metadata', help="Version of APIs available on this Gateway instance.")
c.argument('server_url', arg_group='API metadata', help="Base URL that API consumers will use to access APIs on the Gateway instance.")
c.argument('allowed_origins', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed origins to make cross-site requests. The special value `*` allows all domains.")
c.argument('allowed_methods', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed HTTP methods on cross-site requests. The special value `*` allows all methods.")
c.argument('allowed_headers', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of allowed headers in cross-site requests. The special value `*` allows actual requests to send any header.")
c.argument('max_age', arg_group='Cross-origin Resource Sharing (CORS)', type=int,
help="How long, in seconds, the response from a pre-flight request can be cached by clients.")
c.argument('allow_credentials', arg_group='Cross-origin Resource Sharing (CORS)', arg_type=get_three_state_flag(),
help="Whether user credentials are supported on cross-site requests.")
c.argument('exposed_headers', arg_group='Cross-origin Resource Sharing (CORS)', help="Comma-separated list of HTTP response headers to expose for cross-site requests.")
for scope in ['spring-cloud gateway custom-domain',
'spring-cloud api-portal custom-domain']:
with self.argument_context(scope) as c:
c.argument('domain_name', help='Name of custom domain.')
for scope in ['spring-cloud gateway custom-domain bind',
'spring-cloud gateway custom-domain update',
'spring-cloud api-portal custom-domain bind',
'spring-cloud api-portal custom-domain update']:
with self.argument_context(scope) as c:
c.argument('certificate', type=str, help='Certificate name in Azure Spring Cloud.')
with self.argument_context('spring-cloud gateway route-config') as c:
c.argument('name', help='Name of route config.')
for scope in ['spring-cloud gateway route-config create',
'spring-cloud gateway route-config update']:
with self.argument_context(scope) as c:
c.argument('app_name', type=str, help="The Azure Spring Cloud app name to configure the route.")
c.argument('routes_json', type=str, help="The JSON array of API routes.", validator=validate_routes)
c.argument('routes_file', type=str, help="The file path of JSON array of API routes.", validator=validate_routes)
|
42,467 |
def test_ipynb_flag(tmp_path: local) -> None:
nb = DATA_DIR / "notebook_trailing_newline.ipynb"
tmp_nb = tmp_path / "notebook.a_file_extension_which_is_definitely_not_ipynb"
with open(nb) as src, open(tmp_nb, "w") as dst:
dst.write(src.read())
result = runner.invoke(
main,
[
str(tmp_nb),
"--diff",
"--ipynb",
],
)
expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n" '+print("foo")\n'
assert expected in result.output
|
def test_ipynb_flag(tmp_path: pathlib.Path) -> None:
nb = DATA_DIR / "notebook_trailing_newline.ipynb"
tmp_nb = tmp_path / "notebook.a_file_extension_which_is_definitely_not_ipynb"
with open(nb) as src, open(tmp_nb, "w") as dst:
dst.write(src.read())
result = runner.invoke(
main,
[
str(tmp_nb),
"--diff",
"--ipynb",
],
)
expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n" '+print("foo")\n'
assert expected in result.output
|
29,009 |
def check_tcp_ports(ip, ports, timeout=DEFAULT_TIMEOUT):
"""
Checks whether any of the given ports are open on a target IP.
:param ip: IP of host to attack
:param ports: List of ports to attack. Must not be empty.
:param timeout: Amount of time to wait for connection
:return: list of open ports. If get_banner=True, then a matching list of banners.
"""
sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM) for _ in range(len(ports))]
[s.setblocking(False) for s in sockets]
possible_ports = []
connected_ports_sockets = []
try:
logger.debug("Connecting to the following ports %s" % ",".join((str(x) for x in ports)))
for sock, port in zip(sockets, ports):
err = sock.connect_ex((ip, port))
if err == 0: # immediate connect
connected_ports_sockets.append((port, sock))
possible_ports.append((port, sock))
continue
if err == 10035: # WSAEWOULDBLOCK is valid, see
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms740668%28v=vs.85%29
# .aspx?f=255&MSPPError=-2147217396
possible_ports.append((port, sock))
continue
if err == 115: # EINPROGRESS 115 /* Operation now in progress */
possible_ports.append((port, sock))
continue
logger.warning("Failed to connect to port %s, error code is %d", port, err)
if len(possible_ports) != 0:
timeout = int(round(timeout)) # clamp to integer, to avoid checking input
sockets_to_try = possible_ports[:]
connected_ports_sockets = []
while (timeout >= 0) and sockets_to_try:
sock_objects = [s[1] for s in sockets_to_try]
_, writeable_sockets, _ = select.select(sock_objects, sock_objects, sock_objects, 0)
for s in writeable_sockets:
try: # actual test
connected_ports_sockets.append((s.getpeername()[1], s))
except socket.error: # bad socket, select didn't filter it properly
pass
sockets_to_try = [s for s in sockets_to_try if s not in connected_ports_sockets]
if sockets_to_try:
time.sleep(SLEEP_BETWEEN_POLL)
timeout -= SLEEP_BETWEEN_POLL
logger.debug(
"On host %s discovered the following ports %s"
% (str(ip), ",".join([str(s[0]) for s in connected_ports_sockets]))
)
banners = []
if len(connected_ports_sockets) != 0:
readable_sockets, _, _ = select.select(
[s[1] for s in connected_ports_sockets], [], [], 0
)
# read first BANNER_READ bytes. We ignore errors because service might not send a
# decodable byte string.
banners = [
sock.recv(BANNER_READ).decode(errors="ignore")
if sock in readable_sockets
else ""
for port, sock in connected_ports_sockets
]
pass
# try to cleanup
[s[1].close() for s in possible_ports]
return [port for port, sock in connected_ports_sockets], banners
else:
return [], []
except socket.error as exc:
logger.warning("Exception when checking ports on host %s, Exception: %s", str(ip), exc)
return [], []
|
def check_tcp_ports(ip, ports, timeout=DEFAULT_TIMEOUT):
"""
Checks whether any of the given ports are open on a target IP.
:param ip: IP of host to attack
:param ports: List of ports to attack. Must not be empty.
:param timeout: Amount of time to wait for connection
:return: List of open ports
"""
sockets = [socket.socket(socket.AF_INET, socket.SOCK_STREAM) for _ in range(len(ports))]
[s.setblocking(False) for s in sockets]
possible_ports = []
connected_ports_sockets = []
try:
logger.debug("Connecting to the following ports %s" % ",".join((str(x) for x in ports)))
for sock, port in zip(sockets, ports):
err = sock.connect_ex((ip, port))
if err == 0: # immediate connect
connected_ports_sockets.append((port, sock))
possible_ports.append((port, sock))
continue
if err == 10035: # WSAEWOULDBLOCK is valid, see
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms740668%28v=vs.85%29
# .aspx?f=255&MSPPError=-2147217396
possible_ports.append((port, sock))
continue
if err == 115: # EINPROGRESS 115 /* Operation now in progress */
possible_ports.append((port, sock))
continue
logger.warning("Failed to connect to port %s, error code is %d", port, err)
if len(possible_ports) != 0:
timeout = int(round(timeout)) # clamp to integer, to avoid checking input
sockets_to_try = possible_ports[:]
connected_ports_sockets = []
while (timeout >= 0) and sockets_to_try:
sock_objects = [s[1] for s in sockets_to_try]
_, writeable_sockets, _ = select.select(sock_objects, sock_objects, sock_objects, 0)
for s in writeable_sockets:
try: # actual test
connected_ports_sockets.append((s.getpeername()[1], s))
except socket.error: # bad socket, select didn't filter it properly
pass
sockets_to_try = [s for s in sockets_to_try if s not in connected_ports_sockets]
if sockets_to_try:
time.sleep(SLEEP_BETWEEN_POLL)
timeout -= SLEEP_BETWEEN_POLL
logger.debug(
"On host %s discovered the following ports %s"
% (str(ip), ",".join([str(s[0]) for s in connected_ports_sockets]))
)
banners = []
if len(connected_ports_sockets) != 0:
readable_sockets, _, _ = select.select(
[s[1] for s in connected_ports_sockets], [], [], 0
)
# read first BANNER_READ bytes. We ignore errors because service might not send a
# decodable byte string.
banners = [
sock.recv(BANNER_READ).decode(errors="ignore")
if sock in readable_sockets
else ""
for port, sock in connected_ports_sockets
]
pass
# try to cleanup
[s[1].close() for s in possible_ports]
return [port for port, sock in connected_ports_sockets], banners
else:
return [], []
except socket.error as exc:
logger.warning("Exception when checking ports on host %s, Exception: %s", str(ip), exc)
return [], []
|
1,414 |
def test_multi_task_lasso_cv_dtype():
n_samples, n_features = 10, 3
rng = np.random.RandomState(42)
X = rng.binomial(1, .5, size=(n_samples, n_features))
y = X[:, [0, 0]].copy()
est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y)
assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3)
|
def test_multi_task_lasso_cv_dtype():
n_samples, n_features = 10, 3
rng = np.random.RandomState(42)
X = rng.binomial(1, .5, size=(n_samples, n_features)).astype(np.int)
y = X[:, [0, 0]].copy()
est = MultiTaskLassoCV(n_alphas=5, fit_intercept=True).fit(X, y)
assert_array_almost_equal(est.coef_, [[1, 0, 0]] * 2, decimal=3)
|
5,860 |
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. ``dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, 0, 1)
(0.6666666666666667, 7.401486830834377e-15)
Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
\\,dy \\,dx`.
>>> f = lambda y, x: 1
>>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
(0.41421356237309503, 1.1083280054755938e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y \\,dy \\,dx`
for :math:`a=1, 3`.
>>> f = lambda y, x, a: a*x*y
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
(0.33333333333333337, 5.551115123125783e-15)
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
(0.9999999999999999, 1.6653345369377348e-14)
Compute the two-dimensional Gaussian Integral, which is the integral of the
Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
:math:`(-\\infty,\\infty)`. That is, compute the integral
:math:`\\iint^{\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`:
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
>>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
(3.141592653589777, 2.5173086737433208e-08)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
|
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. ``dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, 0, 1)
(0.6666666666666667, 7.401486830834377e-15)
Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
\\,dy \\,dx`.
>>> f = lambda y, x: 1
>>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
(0.41421356237309503, 1.1083280054755938e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=x}_{y=2-x} a x y \\,dy \\,dx`
for :math:`a=1, 3`.
>>> f = lambda y, x, a: a*x*y
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
(0.33333333333333337, 5.551115123125783e-15)
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
(0.9999999999999999, 1.6653345369377348e-14)
Compute the two-dimensional Gaussian Integral, which is the integral of the
Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
:math:`(-\\infty,\\infty)`. That is, compute the integral
:math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`:
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
>>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
(3.141592653589777, 2.5173086737433208e-08)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
|
23,143 |
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
def store(
sources: Array | Collection[Array],
targets: Array | Collection[Array],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
targets = [targets] # type: ignore
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions = [regions] # type: ignore
if len(sources) > 1 and len(regions) == 1:
regions *= len(sources) # type: ignore
if len(sources) != len(regions):
raise ValueError(
"Different number of sources [%d] and targets [%d] than regions [%d]"
% (len(sources), len(targets), len(regions))
)
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set[str]] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions)
]
map_keys: list = []
for s, t, n, r in zip(sources, targets, map_names, regions):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | Mapping = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
|
41,211 |
def two_qubit_matrix_to_sqrt_iswap_operations(
q0: 'cirq.Qid',
q1: 'cirq.Qid',
mat: np.ndarray,
*,
required_sqrt_iswap_count: Optional[int] = None,
atol: float = 1e-8,
check_preconditions: bool = True,
clean_operations: bool = True,
) -> Sequence['cirq.Operation']:
"""Decomposes a two-qubit operation into Z/XY/sqrt-iSWAP gates.
Args:
q0: The first qubit being operated on.
q1: The other qubit being operated on.
mat: Defines the operation to apply to the pair of qubits.
required_sqrt_iswap_count: When specified, exactly this many sqrt-iSWAP
gates will be used even if fewer is possible (maximum 3).
atol: A limit on the amount of absolute error introduced by the
construction.
clean_operations: Enables optimizing resulting operation list by
merging operations and ejecting phased Paulis and Z operations.
Returns:
A list of operations implementing the matrix including at most three
SQRT_ISWAP (sqrt-iSWAP) gates, single-qubit gates, and a global phase
gate.
References:
Towards ultra-high fidelity quantum operations: SQiSW gate as a native
two-qubit gate
https://arxiv.org/abs/2105.06074
"""
kak = linalg.kak_decomposition(
mat, atol=atol / 10, rtol=0, check_preconditions=check_preconditions
)
operations = _kak_decomposition_to_sqrt_iswap_operations(
q0, q1, kak, required_sqrt_iswap_count, include_global_phase=not clean_operations, atol=atol
)
if clean_operations:
return two_qubit_decompositions._cleanup_operations(operations)
return operations
|
def two_qubit_matrix_to_sqrt_iswap_operations(
q0: 'cirq.Qid',
q1: 'cirq.Qid',
mat: np.ndarray,
*,
required_sqrt_iswap_count: Optional[int] = None,
atol: float = 1e-8,
check_preconditions: bool = True,
clean_operations: bool = True,
) -> Sequence['cirq.Operation']:
"""Decomposes a two-qubit operation into ZPow/XPow/YPow/sqrt-iSWAP gates.
Args:
q0: The first qubit being operated on.
q1: The other qubit being operated on.
mat: Defines the operation to apply to the pair of qubits.
required_sqrt_iswap_count: When specified, exactly this many sqrt-iSWAP
gates will be used even if fewer is possible (maximum 3).
atol: A limit on the amount of absolute error introduced by the
construction.
clean_operations: Enables optimizing resulting operation list by
merging operations and ejecting phased Paulis and Z operations.
Returns:
A list of operations implementing the matrix including at most three
SQRT_ISWAP (sqrt-iSWAP) gates, single-qubit gates, and a global phase
gate.
References:
Towards ultra-high fidelity quantum operations: SQiSW gate as a native
two-qubit gate
https://arxiv.org/abs/2105.06074
"""
kak = linalg.kak_decomposition(
mat, atol=atol / 10, rtol=0, check_preconditions=check_preconditions
)
operations = _kak_decomposition_to_sqrt_iswap_operations(
q0, q1, kak, required_sqrt_iswap_count, include_global_phase=not clean_operations, atol=atol
)
if clean_operations:
return two_qubit_decompositions._cleanup_operations(operations)
return operations
|
8,056 |
def appendavro(table, target, schema=None, sample=9, **avro_args):
"""
Append rows into a avro existing avro file or create a new one.
The `target` argument can be either an existing avro file or the file
path for creating new one.
The `schema` argument is checked against the schema of the existing file.
So it must be the same schema as used by `toavro()` or the schema of the
existing file.
The `sample` argument (int, optional) defines how many rows are inspected
for discovering the field types and building a schema for the avro file
when the `schema` argument is not passed.
Additionally there are support for passing extra options in the
argument `**avro_args` that are fowarded directly to fastavro. Check the
fastavro documentation for reference.
See :meth:`petl.io.avro.toavro` method for more information and examples.
.. versionadded:: 1.3.1
"""
target2 = write_source_from_arg(target)
_write_toavro(table,
target=target2,
mode='a+b',
schema=schema,
sample=sample,
**avro_args)
|
def appendavro(table, target, schema=None, sample=9, **avro_args):
"""
Append rows into a avro existing avro file or create a new one.
The `target` argument can be either an existing avro file or the file
path for creating new one.
The `schema` argument is checked against the schema of the existing file.
So it must be the same schema as used by `toavro()` or the schema of the
existing file.
The `sample` argument (int, optional) defines how many rows are inspected
for discovering the field types and building a schema for the avro file
when the `schema` argument is not passed.
Additionally there are support for passing extra options in the
argument `**avro_args` that are fowarded directly to fastavro. Check the
fastavro documentation for reference.
See :meth:`petl.io.avro.toavro` method for more information and examples.
.. versionadded:: 1.4.0
"""
target2 = write_source_from_arg(target)
_write_toavro(table,
target=target2,
mode='a+b',
schema=schema,
sample=sample,
**avro_args)
|
46,165 |
def poly_to_mask(mask_shape, vertices):
"""Converts a polygon to a boolean mask with `True` for points
lying inside the shape. Uses the bounding box of the vertices to reduce
computation time.
Parameters
----------
mask_shape : np.ndarray | tuple
1x2 array of shape of mask to be generated.
vertices : np.ndarray
Nx2 array of the vertices of the polygon.
Returns
----------
mask : np.ndarray
Boolean array with `True` for points inside the polygon
"""
mask = np.zeros(mask_shape, dtype=bool)
bottom = vertices.min(axis=0).astype('int')
top = np.ceil(vertices.max(axis=0)).astype('int')
top = np.append([top], [mask_shape], axis=0).min(axis=0)
if np.all(top > bottom):
bb_mask = grid_points_in_poly(top - bottom, vertices - bottom)
mask[bottom[0]:top[0], bottom[1]:top[1]] = bb_mask
return mask
|
def poly_to_mask(mask_shape, vertices):
"""Converts a polygon to a boolean mask with `True` for points
lying inside the shape. Uses the bounding box of the vertices to reduce
computation time.
Parameters
----------
mask_shape : np.ndarray | tuple
1x2 array of shape of mask to be generated.
vertices : np.ndarray
Nx2 array of the vertices of the polygon.
Returns
----------
mask : np.ndarray
Boolean array with `True` for points inside the polygon
"""
mask = np.zeros(mask_shape, dtype=bool)
bottom = vertices.min(axis=0).astype(np.int)
top = np.ceil(vertices.max(axis=0)).astype('int')
top = np.append([top], [mask_shape], axis=0).min(axis=0)
if np.all(top > bottom):
bb_mask = grid_points_in_poly(top - bottom, vertices - bottom)
mask[bottom[0]:top[0], bottom[1]:top[1]] = bb_mask
return mask
|
36,705 |
def main(regrtest_args):
args = [sys.executable,
'-u', # Unbuffered stdout and stderr
'-W', 'default', # Warnings set to 'default'
'-bb', # Warnings about bytes/bytearray
]
cross_compile = '_PYTHON_HOST_PLATFORM' in os.environ
hostrunner = os.environ.get("_PYTHON_HOSTRUNNER")
if hostrunner is None:
hostrunner = sysconfig.get_config_var("HOSTRUNNER")
if cross_compile:
# emulate -E, but keep PYTHONPATH + cross compile env vars, so
# test executable can load correct sysconfigdata file.
keep = {
'_PYTHON_PROJECT_BASE',
'_PYTHON_HOST_PLATFORM',
'_PYTHON_SYSCONFIGDATA_NAME',
'PYTHONPATH'
}
environ = {
name: value for name, value in os.environ.items()
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
}
else:
environ = os.environ.copy()
args.append("-E")
# Allow user-specified interpreter options to override our defaults.
args.extend(test.support.args_from_interpreter_flags())
args.extend(['-m', 'test', # Run the test suite
'-r', # Randomize test order
'-w', # Re-run failed tests in verbose mode
])
if sys.platform == 'win32':
args.append('-n') # Silence alerts under Windows
if not any(is_multiprocess_flag(arg) for arg in regrtest_args):
if cross_compile and hostrunner:
# For now use only one core for cross compiled builds.
# hostrunner can be expensive.
args.extend(['-j', '1'])
else:
args.extend(['-j', '0']) # Use all CPU cores
if not any(is_resource_use_flag(arg) for arg in regrtest_args):
args.extend(['-u', 'all,-largefile,-audio,-gui'])
if cross_compile and hostrunner:
# If HOSTRUNNER is set and -p/--python option is not given, then
# use hostrunner to execute python binary for tests.
if not any(is_python_flag(arg) for arg in regrtest_args):
buildpython = sysconfig.get_config_var("BUILDPYTHON")
args.extend(["--python", f"{hostrunner} {buildpython}"])
args.extend(regrtest_args)
print(shlex.join(args))
if sys.platform == 'win32':
from subprocess import call
sys.exit(call(args))
else:
os.execve(sys.executable, args, environ)
|
def main(regrtest_args):
args = [sys.executable,
'-u', # Unbuffered stdout and stderr
'-W', 'default', # Warnings set to 'default'
'-bb', # Warnings about bytes/bytearray
]
cross_compile = '_PYTHON_HOST_PLATFORM' in os.environ
if (hostrunner := os.environ.get("_PYTHON_HOSTRUNNER")) is None:
hostrunner = sysconfig.get_config_var("HOSTRUNNER")
if cross_compile:
# emulate -E, but keep PYTHONPATH + cross compile env vars, so
# test executable can load correct sysconfigdata file.
keep = {
'_PYTHON_PROJECT_BASE',
'_PYTHON_HOST_PLATFORM',
'_PYTHON_SYSCONFIGDATA_NAME',
'PYTHONPATH'
}
environ = {
name: value for name, value in os.environ.items()
if not name.startswith(('PYTHON', '_PYTHON')) or name in keep
}
else:
environ = os.environ.copy()
args.append("-E")
# Allow user-specified interpreter options to override our defaults.
args.extend(test.support.args_from_interpreter_flags())
args.extend(['-m', 'test', # Run the test suite
'-r', # Randomize test order
'-w', # Re-run failed tests in verbose mode
])
if sys.platform == 'win32':
args.append('-n') # Silence alerts under Windows
if not any(is_multiprocess_flag(arg) for arg in regrtest_args):
if cross_compile and hostrunner:
# For now use only one core for cross compiled builds.
# hostrunner can be expensive.
args.extend(['-j', '1'])
else:
args.extend(['-j', '0']) # Use all CPU cores
if not any(is_resource_use_flag(arg) for arg in regrtest_args):
args.extend(['-u', 'all,-largefile,-audio,-gui'])
if cross_compile and hostrunner:
# If HOSTRUNNER is set and -p/--python option is not given, then
# use hostrunner to execute python binary for tests.
if not any(is_python_flag(arg) for arg in regrtest_args):
buildpython = sysconfig.get_config_var("BUILDPYTHON")
args.extend(["--python", f"{hostrunner} {buildpython}"])
args.extend(regrtest_args)
print(shlex.join(args))
if sys.platform == 'win32':
from subprocess import call
sys.exit(call(args))
else:
os.execve(sys.executable, args, environ)
|
1,597 |
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~nan_mask).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
58,796 |
def test_binary_add_from_constant_scalar():
dtype = "uint8"
ifm_shape = (1, 4, 4, 8)
def create_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(add), add)
return tvm.IRModule.from_expr(func)
def verify(ext_func):
op = ext_func.body
assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]
assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]
assert op.args[0].checked_type.dtype == "uint8"
assert list(op.checked_type.shape) == [1, 4, 4, 8]
assert op.checked_type.dtype == "uint8"
assert op.attrs.operator_type == "ADD"
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethosu_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethosu_main_0"]
)
verify(mod["tvmgen_default_ethosu_main_0"])
|
def test_binary_add_from_constant_scalar():
dtype = "uint8"
ifm_shape = (1, 4, 4, 8)
def create_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(add), add)
return tvm.IRModule.from_expr(func)
def verify(ext_func):
op = ext_func.body
assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]
assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]
assert op.args[0].checked_type.dtype == "uint8"
assert list(op.checked_type.shape) == [1, 4, 4, 8]
assert op.checked_type.dtype == "uint8"
assert op.attrs.operator_type == "ADD"
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
|
27,995 |
def scan_for_review_comment(job):
"""Scan a file for review comments returns
all the found review comments.
"""
file_path, lines = job
sc_handler = SourceCodeCommentHandler()
comments = []
with open(file_path, mode='r',
encoding='utf-8',
errors='ignore') as sf:
comments, misspelled_comments = \
sc_handler.scan_source_line_comments(sf, lines)
for mc in misspelled_comments:
LOG.warning("There are misspelled review status comments in %s")
LOG.warning(mc)
return comments
|
def scan_for_review_comment(job):
"""Scan a file for review comments returns
all the found review comments.
"""
file_path, lines = job
sc_handler = SourceCodeCommentHandler()
comments = []
with open(file_path, mode='r',
encoding='utf-8',
errors='ignore') as sf:
comments, misspelled_comments = \
sc_handler.scan_source_line_comments(sf, lines)
for mc in misspelled_comments:
LOG.warning("There are misspelled review status comments in %s", mc)
LOG.warning(mc)
return comments
|
31,260 |
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
8,799 |
def _join_event_processing(bot):
"""Process a batch of JOIN event from the ``join_events_queue`` queue.
Every time this function is executed, it process at most ``throttle_join``
JOIN event: for each, it sends a WHO request to know more about the
channel. This will prevent an excess of flood when there are too many
channels to join at once.
"""
batch_size = max(bot.settings.core.throttle_join, 1)
for _ in range(batch_size):
try:
channel = bot.memory['join_events_queue'].popleft()
except IndexError:
break
LOGGER.debug('Send WHO after JOIN channel: %s', channel)
_send_who(bot, channel)
|
def _join_event_processing(bot):
"""Process a batch of JOIN event from the ``join_events_queue`` queue.
Every time this function is executed, it processes at most ``throttle_join``
JOIN event: for each, it sends a WHO request to know more about the
channel. This will prevent an excess of flood when there are too many
channels to join at once.
"""
batch_size = max(bot.settings.core.throttle_join, 1)
for _ in range(batch_size):
try:
channel = bot.memory['join_events_queue'].popleft()
except IndexError:
break
LOGGER.debug('Send WHO after JOIN channel: %s', channel)
_send_who(bot, channel)
|
30,971 |
def main():
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
# get the service API url
url = demisto.params()['url']
max_fetch = demisto.params()['max_fetch']
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(server_url=url, username=username, password=password, max_fetch=int(max_fetch))
if demisto.command() == 'thycotic-authenticate-token':
return_results(authenticate_token_command(client))
elif demisto.command() == 'thycotic-secret-password-get':
return_results(secret_password_get_command(client, **demisto.args()))
elif demisto.command() == 'thycotic-secret-username-get':
return_results(secret_username_get_command(client, **demisto.args()))
elif demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
username = demisto.params().get('credentials').get('identifier')
password = demisto.params().get('credentials').get('password')
# get the service API url
url = demisto.params()['url']
max_fetch = demisto.params()['max_fetch']
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(server_url=url, username=username, password=password)
if demisto.command() == 'thycotic-authenticate-token':
return_results(authenticate_token_command(client))
elif demisto.command() == 'thycotic-secret-password-get':
return_results(secret_password_get_command(client, **demisto.args()))
elif demisto.command() == 'thycotic-secret-username-get':
return_results(secret_username_get_command(client, **demisto.args()))
elif demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
7,272 |
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is only rotation invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
58,402 |
def _add_source_bounds(media_ref, src, context):
if not hasattr(media_ref, "available_image_bounds"):
return
global_scale = context.get('global_scale')
global_translate = context.get('global_translate')
if not global_scale or not global_translate:
return
bounds = media_ref.available_image_bounds
if not bounds:
return
# A width of 1.0 in RV means draw to the aspect ratio, so scale the
# width by the inverse of the aspect ratio
#
media_info = commands.sourceMediaInfo(src)
height = media_info['height']
aspect_ratio = 1.0 if height == 0 else media_info['width'] / height
translate = bounds.center() * global_scale - global_translate
scale = (bounds.max - bounds.min) * global_scale
transform_node = extra_commands.associatedNode('RVTransform2D', src)
commands.setFloatProperty(
"{}.transform.scale".format(transform_node),
[scale.x / aspect_ratio, scale.y]
)
commands.setFloatProperty(
"{}.transform.translate".format(transform_node),
[translate.x, translate.y]
)
# write the bounds global_scale and global_translate to the node so we can
# preserve the original values if we round-trip
commands.newProperty(
"{}.otio.global_scale".format(transform_node),
commands.FloatType,
2
)
commands.newProperty(
"{}.otio.global_translate".format(transform_node),
commands.FloatType,
2
)
commands.setFloatProperty(
"{}.otio.global_scale".format(transform_node),
[global_scale.x, global_scale.y],
True
)
commands.setFloatProperty(
"{}.otio.global_translate".format(transform_node),
[global_translate.x, global_translate.y],
True
)
|
def _add_source_bounds(media_ref, src, context):
if media_ref.available_image_bounds is None:
return
global_scale = context.get('global_scale')
global_translate = context.get('global_translate')
if not global_scale or not global_translate:
return
bounds = media_ref.available_image_bounds
if not bounds:
return
# A width of 1.0 in RV means draw to the aspect ratio, so scale the
# width by the inverse of the aspect ratio
#
media_info = commands.sourceMediaInfo(src)
height = media_info['height']
aspect_ratio = 1.0 if height == 0 else media_info['width'] / height
translate = bounds.center() * global_scale - global_translate
scale = (bounds.max - bounds.min) * global_scale
transform_node = extra_commands.associatedNode('RVTransform2D', src)
commands.setFloatProperty(
"{}.transform.scale".format(transform_node),
[scale.x / aspect_ratio, scale.y]
)
commands.setFloatProperty(
"{}.transform.translate".format(transform_node),
[translate.x, translate.y]
)
# write the bounds global_scale and global_translate to the node so we can
# preserve the original values if we round-trip
commands.newProperty(
"{}.otio.global_scale".format(transform_node),
commands.FloatType,
2
)
commands.newProperty(
"{}.otio.global_translate".format(transform_node),
commands.FloatType,
2
)
commands.setFloatProperty(
"{}.otio.global_scale".format(transform_node),
[global_scale.x, global_scale.y],
True
)
commands.setFloatProperty(
"{}.otio.global_translate".format(transform_node),
[global_translate.x, global_translate.y],
True
)
|
25,565 |
def login_or_register(
client: GMatrixClient, signer: Signer, prev_user_id: str = None, prev_access_token: str = None
) -> User:
"""Login to a Raiden matrix server with password and displayname proof-of-keys
- Username is in the format: 0x<eth_address>(.<suffix>)?, where the suffix is not required,
but a deterministic (per-account) random 8-hex string to prevent DoS by other users registering
our address
- Password is the signature of the server hostname, verified by the server to prevent account
creation spam
- Displayname currently is the signature of the whole user_id (including homeserver), to be
verified by other peers. May include in the future other metadata such as protocol version
Params:
client: GMatrixClient instance configured with desired homeserver
signer: raiden.utils.signer.Signer instance for signing password and displayname
prev_user_id: (optional) previously persisted client.user_id. Must match signer's account
prev_access_token: (optional) previously persisted client.access_token for prev_user_id
Returns:
Own matrix_client.User
"""
server_url = client.api.base_url
server_name = urlparse(server_url).netloc
base_username = str(to_normalized_address(signer.address))
_match_user = re.match(
f"^@{re.escape(base_username)}.*:{re.escape(server_name)}$", prev_user_id or ""
)
if _match_user: # same user as before
assert prev_user_id is not None
log.debug("Trying previous user login", user_id=prev_user_id)
client.set_access_token(user_id=prev_user_id, token=prev_access_token)
try:
# Test the credentional. Any API that requries authentication
# would be enough.
client.api.get_devices()
except MatrixRequestError as ex:
log.debug(
"Couldn't use previous login credentials, discarding",
prev_user_id=prev_user_id,
_exception=ex,
)
else:
prev_sync_limit = client.set_sync_limit(0)
client._sync() # initial_sync
client.set_sync_limit(prev_sync_limit)
log.debug("Success. Valid previous credentials", user_id=prev_user_id)
return client.get_user(client.user_id)
elif prev_user_id:
log.debug(
"Different server or account, discarding",
prev_user_id=prev_user_id,
current_address=base_username,
current_server=server_name,
)
# password is signed server address
password = encode_hex(signer.sign(server_name.encode()))
rand = None
# try login and register on first 5 possible accounts
for i in range(JOIN_RETRIES):
username = base_username
# Notes:
# - The PRNG is initialized with a deterministic seed based on the
# user's signature. This allows the node to recover the random userid
# even if data is lost.
# - The first iteration does not have a random part for the userid,
# this is only a small convinience to avoid an unecessary signature.
if i:
if not rand:
rand = Random()
rand.seed(int.from_bytes(signer.sign(b"seed")[-32:], "big"))
username = f"{username}.{rand.randint(0, 0xffffffff):08x}"
try:
client.login(username, password, sync=False)
prev_sync_limit = client.set_sync_limit(0)
client._sync() # when logging, do initial_sync with limit=0
client.set_sync_limit(prev_sync_limit)
break
except MatrixRequestError as ex:
if ex.code != 403:
raise
log.debug(
"Could not login. Trying register",
homeserver=server_name,
server_url=server_url,
username=username,
)
try:
client.register_with_password(username, password)
log.debug(
"Register", homeserver=server_name, server_url=server_url, username=username
)
break
except MatrixRequestError as ex:
if ex.code != 400:
raise
log.debug("Username taken. Continuing")
continue
else:
raise ValueError("Could not register or login!")
signature_bytes = signer.sign(client.user_id.encode())
signature_hex = encode_hex(signature_bytes)
user = client.get_user(client.user_id)
user.set_display_name(signature_hex)
log.debug(
"Matrix user login", homeserver=server_name, server_url=server_url, username=username
)
return user
|
def login_or_register(
client: GMatrixClient, signer: Signer, prev_user_id: str = None, prev_access_token: str = None
) -> User:
"""Login to a Raiden matrix server with password and displayname proof-of-keys
- Username is in the format: 0x<eth_address>(.<suffix>)?, where the suffix is not required,
but a deterministic (per-account) random 8-hex string to prevent DoS by other users registering
our address
- Password is the signature of the server hostname, verified by the server to prevent account
creation spam
- Displayname currently is the signature of the whole user_id (including homeserver), to be
verified by other peers. May include in the future other metadata such as protocol version
Params:
client: GMatrixClient instance configured with desired homeserver
signer: raiden.utils.signer.Signer instance for signing password and displayname
prev_user_id: (optional) previously persisted client.user_id. Must match signer's account
prev_access_token: (optional) previously persisted client.access_token for prev_user_id
Returns:
Own matrix_client.User
"""
server_url = client.api.base_url
server_name = urlparse(server_url).netloc
base_username = str(to_normalized_address(signer.address))
_match_user = re.match(
f"^@{re.escape(base_username)}.*:{re.escape(server_name)}$", prev_user_id or ""
)
if _match_user: # same user as before
assert prev_user_id is not None
log.debug("Trying previous user login", user_id=prev_user_id)
client.set_access_token(user_id=prev_user_id, token=prev_access_token)
try:
# Test the credential. Any API that requires authentication
# would be enough.
client.api.get_devices()
except MatrixRequestError as ex:
log.debug(
"Couldn't use previous login credentials, discarding",
prev_user_id=prev_user_id,
_exception=ex,
)
else:
prev_sync_limit = client.set_sync_limit(0)
client._sync() # initial_sync
client.set_sync_limit(prev_sync_limit)
log.debug("Success. Valid previous credentials", user_id=prev_user_id)
return client.get_user(client.user_id)
elif prev_user_id:
log.debug(
"Different server or account, discarding",
prev_user_id=prev_user_id,
current_address=base_username,
current_server=server_name,
)
# password is signed server address
password = encode_hex(signer.sign(server_name.encode()))
rand = None
# try login and register on first 5 possible accounts
for i in range(JOIN_RETRIES):
username = base_username
# Notes:
# - The PRNG is initialized with a deterministic seed based on the
# user's signature. This allows the node to recover the random userid
# even if data is lost.
# - The first iteration does not have a random part for the userid,
# this is only a small convinience to avoid an unecessary signature.
if i:
if not rand:
rand = Random()
rand.seed(int.from_bytes(signer.sign(b"seed")[-32:], "big"))
username = f"{username}.{rand.randint(0, 0xffffffff):08x}"
try:
client.login(username, password, sync=False)
prev_sync_limit = client.set_sync_limit(0)
client._sync() # when logging, do initial_sync with limit=0
client.set_sync_limit(prev_sync_limit)
break
except MatrixRequestError as ex:
if ex.code != 403:
raise
log.debug(
"Could not login. Trying register",
homeserver=server_name,
server_url=server_url,
username=username,
)
try:
client.register_with_password(username, password)
log.debug(
"Register", homeserver=server_name, server_url=server_url, username=username
)
break
except MatrixRequestError as ex:
if ex.code != 400:
raise
log.debug("Username taken. Continuing")
continue
else:
raise ValueError("Could not register or login!")
signature_bytes = signer.sign(client.user_id.encode())
signature_hex = encode_hex(signature_bytes)
user = client.get_user(client.user_id)
user.set_display_name(signature_hex)
log.debug(
"Matrix user login", homeserver=server_name, server_url=server_url, username=username
)
return user
|
14,482 |
def _install_galaxy_role() -> None:
"""Detect standalone galaxy role and installs it."""
if not os.path.exists("meta/main.yml"):
return
yaml = yaml_from_file("meta/main.yml")
if 'galaxy_info' not in yaml:
return
role_name = yaml['galaxy_info'].get('role_name', None)
role_author = yaml['galaxy_info'].get('author', None)
if not role_name:
role_name = pathlib.Path(".").absolute().name
role_name = re.sub(r'^{0}'.format(re.escape('ansible-role-')), '', role_name)
fqrn = f"{role_author}.{role_name}"
if not re.match(r"[\w\d_]{2,}\.[a-z][a-z0-9_]+$", fqrn):
print(
f"""\
Computed fully qualified role name of {fqrn} is not valid.
Please edit meta/main.yml and assure we can correctly determine full role name:
galaxy_info:
role_name: my_name # if absent directory name hosting role is used instead
author: my_galaxy_namespace
See: https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespace-limitations
""",
file=sys.stderr,
)
sys.exit(INVALID_PREREQUISITES_RC)
p = pathlib.Path(".cache/roles")
p.mkdir(parents=True, exist_ok=True)
link_path = p / f"{role_author}.{role_name}"
# despite documentation stating that is_file() reports true for symlinks,
# it appears that is_dir() reports true instead, so we rely on exits().
if not link_path.exists():
link_path.symlink_to(pathlib.Path("../..", target_is_directory=True))
print(
f"Using {link_path} symlink to current repository in order to enable Ansible to find the role using its expected full name.",
file=sys.stderr,
)
|
def _install_galaxy_role() -> None:
"""Detect standalone galaxy role and installs it."""
if not os.path.exists("meta/main.yml"):
return
yaml = yaml_from_file("meta/main.yml")
if 'galaxy_info' not in yaml:
return
role_name = yaml['galaxy_info'].get('role_name', None)
role_author = yaml['galaxy_info'].get('author', None)
if not role_name:
role_name = pathlib.Path(".").absolute().name
role_name = re.sub(r'^{0}'.format(re.escape('ansible-role-')), '', role_name)
fqrn = f"{role_author}.{role_name}"
if not re.match(r"[a-z0-9][a-z0-9_]{2,}\.[a-z][a-z0-9_]+$", fqrn):
print(
f"""\
Computed fully qualified role name of {fqrn} is not valid.
Please edit meta/main.yml and assure we can correctly determine full role name:
galaxy_info:
role_name: my_name # if absent directory name hosting role is used instead
author: my_galaxy_namespace
See: https://galaxy.ansible.com/docs/contributing/namespaces.html#galaxy-namespace-limitations
""",
file=sys.stderr,
)
sys.exit(INVALID_PREREQUISITES_RC)
p = pathlib.Path(".cache/roles")
p.mkdir(parents=True, exist_ok=True)
link_path = p / f"{role_author}.{role_name}"
# despite documentation stating that is_file() reports true for symlinks,
# it appears that is_dir() reports true instead, so we rely on exits().
if not link_path.exists():
link_path.symlink_to(pathlib.Path("../..", target_is_directory=True))
print(
f"Using {link_path} symlink to current repository in order to enable Ansible to find the role using its expected full name.",
file=sys.stderr,
)
|
35,200 |
def preconfigure_modules(compiler, settings):
"""Returns a list of modules buildable in given environment and settings.
For each module in MODULES list, this function checks if the module
can be built in the current environment and reports it.
Returns a list of module names available.
"""
nvcc_path = build.get_nvcc_path()
hipcc_path = build.get_hipcc_path()
summary = [
'',
'************************************************************',
'* CuPy Configuration Summary *',
'************************************************************',
'',
'Build Environment:',
' Include directories: {}'.format(str(settings['include_dirs'])),
' Library directories: {}'.format(str(settings['library_dirs'])),
' nvcc command : {}'.format(
nvcc_path if nvcc_path else '(not found)'),
' hipcc command : {}'.format(
hipcc_path if hipcc_path else '(not found)'),
'',
'Environment Variables:',
]
for key in ['CFLAGS', 'LDFLAGS', 'LIBRARY_PATH',
'CUDA_PATH', 'NVTOOLSEXT_PATH', 'NVCC', 'HIPCC',
'ROCM_HOME']:
summary += [' {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]
summary += [
'',
'Modules:',
]
ret = []
for module in MODULES:
installed = False
status = 'No'
errmsg = []
if module['name'] == 'cutensor':
cutensor_path = os.environ.get('CUTENSOR_PATH', '')
inc_path = os.path.join(cutensor_path, 'include')
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
cuda_version = build.get_cuda_version()
cuda_major = str(cuda_version // 1000)
cuda_major_minor = f'{cuda_major}.{(cuda_version // 10) % 100}'
for cuda_ver in (cuda_major_minor, cuda_major):
lib_path = os.path.join(cutensor_path, 'lib', cuda_ver)
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
break
print('')
print('-------- Configuring Module: {} --------'.format(
module['name']))
sys.stdout.flush()
if not check_library(
compiler,
includes=module['include'],
include_dirs=settings['include_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Include files not found: %s' % module['include'],
'Check your CFLAGS environment variable.']
elif not check_library(
compiler,
libraries=module['libraries'],
library_dirs=settings['library_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Cannot link libraries: %s' % module['libraries'],
'Check your LDFLAGS environment variable.']
elif ('check_method' in module and
not module['check_method'](compiler, settings)):
# Fail on per-library condition check (version requirements etc.)
installed = True
errmsg = ['The library is installed but not supported.']
elif (module['name'] in ('thrust', 'cub', 'random')
and (nvcc_path is None and hipcc_path is None)):
installed = True
cmd = 'nvcc' if not use_hip else 'hipcc'
errmsg = ['{} command could not be found in PATH.'.format(cmd),
'Check your PATH environment variable.']
elif ('required_cuda_version' in module
and not module['check_cuda_version'](build.get_cuda_version())):
cuda_version = build.get_cuda_version()
cuda_major = cuda_version // 1000
cuda_major_minor = f'{cuda_major}.{(cuda_version // 10) % 100}'
installed = True
errmsg = [
f'The library is not supported for CUDA {cuda_major_minor}']
else:
installed = True
status = 'Yes'
ret.append(module['name'])
if installed and 'version_method' in module:
status += ' (version {})'.format(module['version_method'](True))
summary += [
' {:<10}: {}'.format(module['name'], status)
]
# If error message exists...
if len(errmsg) != 0:
summary += [' -> {}'.format(m) for m in errmsg]
# Skip checking other modules when CUDA is unavailable.
if module['name'] == 'cuda':
break
# Get a list of the CC of the devices connected to this node
if not use_hip:
build.check_compute_capabilities(compiler, settings)
if len(ret) != len(MODULES):
if 'cuda' in ret:
lines = [
'WARNING: Some modules could not be configured.',
'CuPy will be installed without these modules.',
]
else:
lines = [
'ERROR: CUDA could not be found on your system.',
]
summary += [
'',
] + lines + [
'Please refer to the Installation Guide for details:',
'https://docs.cupy.dev/en/stable/install.html',
'',
]
summary += [
'************************************************************',
'',
]
print('\n'.join(summary))
return ret, settings
|
def preconfigure_modules(compiler, settings):
"""Returns a list of modules buildable in given environment and settings.
For each module in MODULES list, this function checks if the module
can be built in the current environment and reports it.
Returns a list of module names available.
"""
nvcc_path = build.get_nvcc_path()
hipcc_path = build.get_hipcc_path()
summary = [
'',
'************************************************************',
'* CuPy Configuration Summary *',
'************************************************************',
'',
'Build Environment:',
' Include directories: {}'.format(str(settings['include_dirs'])),
' Library directories: {}'.format(str(settings['library_dirs'])),
' nvcc command : {}'.format(
nvcc_path if nvcc_path else '(not found)'),
' hipcc command : {}'.format(
hipcc_path if hipcc_path else '(not found)'),
'',
'Environment Variables:',
]
for key in ['CFLAGS', 'LDFLAGS', 'LIBRARY_PATH',
'CUDA_PATH', 'NVTOOLSEXT_PATH', 'NVCC', 'HIPCC',
'ROCM_HOME']:
summary += [' {:<16}: {}'.format(key, os.environ.get(key, '(none)'))]
summary += [
'',
'Modules:',
]
ret = []
for module in MODULES:
installed = False
status = 'No'
errmsg = []
if module['name'] == 'cutensor':
cutensor_path = os.environ.get('CUTENSOR_PATH', '')
inc_path = os.path.join(cutensor_path, 'include')
if os.path.exists(inc_path):
settings['include_dirs'].append(inc_path)
cuda_version = build.get_cuda_version()
cuda_major = str(cuda_version // 1000)
cuda_major_minor = f'{cuda_major}.{(cuda_version // 10) % 100}'
for cuda_ver in (cuda_major_minor, cuda_major):
lib_path = os.path.join(cutensor_path, 'lib', cuda_ver)
if os.path.exists(lib_path):
settings['library_dirs'].append(lib_path)
break
print('')
print('-------- Configuring Module: {} --------'.format(
module['name']))
sys.stdout.flush()
if not check_library(
compiler,
includes=module['include'],
include_dirs=settings['include_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Include files not found: %s' % module['include'],
'Check your CFLAGS environment variable.']
elif not check_library(
compiler,
libraries=module['libraries'],
library_dirs=settings['library_dirs'],
define_macros=settings['define_macros'],
extra_compile_args=settings['extra_compile_args']):
errmsg = ['Cannot link libraries: %s' % module['libraries'],
'Check your LDFLAGS environment variable.']
elif ('check_method' in module and
not module['check_method'](compiler, settings)):
# Fail on per-library condition check (version requirements etc.)
installed = True
errmsg = ['The library is installed but not supported.']
elif (module['name'] in ('thrust', 'cub', 'random')
and (nvcc_path is None and hipcc_path is None)):
installed = True
cmd = 'nvcc' if not use_hip else 'hipcc'
errmsg = ['{} command could not be found in PATH.'.format(cmd),
'Check your PATH environment variable.']
elif ('required_cuda_version' in module
and not module['check_cuda_version'](build.get_cuda_version())):
cuda_version = build.get_cuda_version()
cuda_major = cuda_version // 1000
cuda_major_minor = f'{cuda_major}.{(cuda_version // 10) % 100}'
installed = True
errmsg = [
f'Not supported in CUDA {cuda_major_minor}']
else:
installed = True
status = 'Yes'
ret.append(module['name'])
if installed and 'version_method' in module:
status += ' (version {})'.format(module['version_method'](True))
summary += [
' {:<10}: {}'.format(module['name'], status)
]
# If error message exists...
if len(errmsg) != 0:
summary += [' -> {}'.format(m) for m in errmsg]
# Skip checking other modules when CUDA is unavailable.
if module['name'] == 'cuda':
break
# Get a list of the CC of the devices connected to this node
if not use_hip:
build.check_compute_capabilities(compiler, settings)
if len(ret) != len(MODULES):
if 'cuda' in ret:
lines = [
'WARNING: Some modules could not be configured.',
'CuPy will be installed without these modules.',
]
else:
lines = [
'ERROR: CUDA could not be found on your system.',
]
summary += [
'',
] + lines + [
'Please refer to the Installation Guide for details:',
'https://docs.cupy.dev/en/stable/install.html',
'',
]
summary += [
'************************************************************',
'',
]
print('\n'.join(summary))
return ret, settings
|
42,118 |
def test_dominates_2d() -> None:
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
# Check all pairs of trials consisting of these values, i.e.,
# [-inf, -inf], [-inf, -1], [-inf, 1], [-inf, inf], [-1, -inf], ...
# These values should be specified in ascending order.
vals = [-float("inf"), -1, 1, float("inf")]
# The following table illustrates an example of dominance relations.
# "D" cells in the table dominates the "t" cell in (MINIMIZE, MAXIMIZE) setting.
#
# v[1]
# ╔═════╤═════╤═════╤═════╤═════╗
# ║ │ -∞ │ -1 │ 1 │ ∞ ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ -∞ │ │ │ D │ D ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ -1 │ │ │ D │ D ║
# v[0] ╟─────┼─────┼─────┼─────┼─────╢
# ║ 1 │ │ │ t │ D ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ ∞ │ │ │ │ ║
# ╚═════╧═════╧═════╧═════╧═════╝
#
# In the following code, we check that for each position of "t" cell, the relation
# above holds.
# Generate the set of all possible indices.
all_indices = set((i, j) for i in range(len(vals)) for j in range(len(vals)))
for (t_i, t_j) in all_indices:
# Generate the set of all indices that dominates the current index.
dominating_indices = set(
(D_i, D_j) for D_i in range(t_i + 1) for D_j in range(t_j, len(vals))
)
dominating_indices -= {(t_i, t_j)}
for (D_i, D_j) in dominating_indices:
trial1 = _create_trial([vals[t_i], vals[t_j]])
trial2 = _create_trial([vals[D_i], vals[D_j]])
assert _dominates(trial2, trial1, directions)
for (D_i, D_j) in all_indices - dominating_indices:
trial1 = _create_trial([vals[t_i], vals[t_j]])
trial2 = _create_trial([vals[D_i], vals[D_j]])
assert not _dominates(trial2, trial1, directions)
|
def test_dominates_2d() -> None:
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
# Check all pairs of trials consisting of these values, i.e.,
# [-inf, -inf], [-inf, -1], [-inf, 1], [-inf, inf], [-1, -inf], ...
# These values should be specified in ascending order.
vals = [-float("inf"), -1, 1, float("inf")]
# The following table illustrates an example of dominance relations.
# "D" cells in the table dominates the "t" cell in (MINIMIZE, MAXIMIZE) setting.
#
# v[1]
# ╔═════╤═════╤═════╤═════╤═════╗
# ║ │ -∞ │ -1 │ 1 │ ∞ ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ -∞ │ │ │ D │ D ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ -1 │ │ │ D │ D ║
# v[0] ╟─────┼─────┼─────┼─────┼─────╢
# ║ 1 │ │ │ t │ D ║
# ╟─────┼─────┼─────┼─────┼─────╢
# ║ ∞ │ │ │ │ ║
# ╚═════╧═════╧═════╧═════╧═════╝
#
# In the following code, we check that for each position of "t" cell, the relation
# above holds.
# Generate the set of all possible indices.
all_indices = set((i, j) for i in range(len(vals)) for j in range(len(vals)))
for (t_i, t_j) in all_indices:
# Generate the set of all indices that dominates the current index.
dominating_indices = set(
(d_i, d_j) for d_i in range(t_i + 1) for d_j in range(t_j, len(vals))
)
dominating_indices -= {(t_i, t_j)}
for (D_i, D_j) in dominating_indices:
trial1 = _create_trial([vals[t_i], vals[t_j]])
trial2 = _create_trial([vals[D_i], vals[D_j]])
assert _dominates(trial2, trial1, directions)
for (D_i, D_j) in all_indices - dominating_indices:
trial1 = _create_trial([vals[t_i], vals[t_j]])
trial2 = _create_trial([vals[D_i], vals[D_j]])
assert not _dominates(trial2, trial1, directions)
|
2,432 |
def spectral_clustering(
affinity,
*,
n_clusters=8,
n_components=None,
eigen_solver=None,
random_state=None,
n_init=10,
eigen_tol=0.0,
assign_labels="kmeans",
verbose=False,
):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts [1]_, [2]_.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used. See [4]_ for more details regarding `'lobpcg'`.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are three ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization [3]_.
The newest cluster_qr method [5]_ directly extract clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parametersand runs no iterations, yet may outperform
k-means and discretization in terms of both quality and speed.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324>`_
.. [2] `A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323>`_
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf>`_
.. [4] `Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001.
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<https://epubs.siam.org/doi/pdf/10.1137/S1064827500366124>`_
.. [5] `Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<https://doi.org/10.1093/imaiai/iay008>`_
Notes
-----
The graph should contain only one connected component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ("kmeans", "discretize", "cluster_qr"):
raise ValueError(
"The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', or 'cluster_qr', "
"but '%s' was given" % assign_labels
)
if isinstance(affinity, np.matrix):
raise TypeError(
"spectral_clustering does not support passing in affinity as an "
"np.matrix. Please convert to a numpy array with np.asarray. For "
"more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa
)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(
affinity,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol,
drop_first=False,
)
if verbose:
print(f"Computing label assignment using {assign_labels}")
if assign_labels == "kmeans":
_, labels, _ = k_means(
maps, n_clusters, random_state=random_state, n_init=n_init, verbose=verbose
)
elif assign_labels == "cluster_qr":
labels = cluster_qr(maps)
else:
labels = discretize(maps, random_state=random_state)
return labels
|
def spectral_clustering(
affinity,
*,
n_clusters=8,
n_components=None,
eigen_solver=None,
random_state=None,
n_init=10,
eigen_tol=0.0,
assign_labels="kmeans",
verbose=False,
):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts [1]_, [2]_.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used. See [4]_ for more details regarding `'lobpcg'`.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are three ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization [3]_.
The newest cluster_qr method [5]_ directly extract clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parametersand runs no iterations, yet may outperform
k-means and discretization in terms of both quality and speed.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324>`_
.. [2] `A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323>`_
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf>`_
.. [4] `Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001.
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<https://epubs.siam.org/doi/pdf/10.1137/S1064827500366124>`_
.. [5] `Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<https://doi.org/10.1093/imaiai/iay008>`_
Notes
-----
The graph should contain only one connected component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ("kmeans", "discretize", "cluster_qr"):
raise ValueError(
"The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', or 'cluster_qr', "
f"but {assign_labels!r} was given"
)
if isinstance(affinity, np.matrix):
raise TypeError(
"spectral_clustering does not support passing in affinity as an "
"np.matrix. Please convert to a numpy array with np.asarray. For "
"more information see: "
"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa
)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(
affinity,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol,
drop_first=False,
)
if verbose:
print(f"Computing label assignment using {assign_labels}")
if assign_labels == "kmeans":
_, labels, _ = k_means(
maps, n_clusters, random_state=random_state, n_init=n_init, verbose=verbose
)
elif assign_labels == "cluster_qr":
labels = cluster_qr(maps)
else:
labels = discretize(maps, random_state=random_state)
return labels
|
58,073 |
def get_threat_data_command(client: Client, args=Dict[str, Any]) -> CommandResults:
"""
get_threat_data: List thread data and allow query
"""
page = int(args.get("page", 1))
page_size = int(args.get("page_size", 1))
query = args.get("query", "type=indicator")
response = client.get_threat_data(page, page_size, query)
threat_data_list = response.get("data", {}).get("results", [])
results = [data for data in threat_data_list]
result = CommandResults(
readable_output=tableToMarkdown("Threat Data", results, removeNull=True),
outputs_prefix="CTIX.ThreatData",
outputs_key_field="id",
outputs=results,
raw_response=results,
)
return result
|
def get_threat_data_command(client: Client, args=Dict[str, Any]) -> CommandResults:
"""
get_threat_data: List thread data and allow query
"""
page = arg_to_number(args.get("page", 1))
page_size = arg_to_number(args.get("page_size", 1))
query = args.get("query", "type=indicator")
response = client.get_threat_data(page, page_size, query)
threat_data_list = response.get("data", {}).get("results", [])
results = [data for data in threat_data_list]
result = CommandResults(
readable_output=tableToMarkdown("Threat Data", results, removeNull=True),
outputs_prefix="CTIX.ThreatData",
outputs_key_field="id",
outputs=results,
raw_response=results,
)
return result
|
48,304 |
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
cpm_url=dict(type='str', required=True),
cpm_username=dict(type='str', required=True),
cpm_password=dict(type='str', required=True, no_log=True),
port=dict(type='str', required=True),
use_https=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
use_proxy=dict(type='bool', default=False)
)
result = dict(
changed=False,
data=''
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])),
errors='surrogate_or_strict')))
if module.params['use_https'] is True:
protocol = "https://"
else:
protocol = "http://"
fullurl = ("%s%s/api/v2/config/serialports?ports=%s" % (protocol, to_native(module.params['cpm_url']), to_native(module.params['port'])))
try:
response = open_url(fullurl, data=None, method='GET', validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='GET: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='GET: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='GET: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='GET: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['data'] = json.loads(response.read())
module.exit_json(**result)
|
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
cpm_url=dict(type='str', required=True),
cpm_username=dict(type='str', required=True),
cpm_password=dict(type='str', required=True, no_log=True),
port=dict(type='str', required=True),
use_https=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
use_proxy=dict(type='bool', default=False)
)
result = dict(
changed=False,
data=''
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])),
errors='surrogate_or_strict')))
if module.params['use_https'] is True:
protocol = "https://"
else:
protocol = "http://"
ports = module.params['port']
if isinstance(ports, list):
ports = ','.join(to_native(x) for x in ports)
fullurl = ("%s%s/api/v2/config/serialports?ports=%s" % (protocol, to_native(module.params['cpm_url']), ports))
try:
response = open_url(fullurl, data=None, method='GET', validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'],
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
fail_json = dict(msg='GET: Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except URLError as e:
fail_json = dict(msg='GET: Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except SSLValidationError as e:
fail_json = dict(msg='GET: Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
except ConnectionError as e:
fail_json = dict(msg='GET: Error connecting to {0} : {1}'.format(fullurl, to_native(e)), changed=False)
module.fail_json(**fail_json)
result['data'] = json.loads(response.read())
module.exit_json(**result)
|
35,790 |
def read_pfm(file_name: str, slice_channels: int = 2) -> np.ndarray:
"""Read file in .pfm format. Might contain
Args:
file_name (str): Path to the file.
slice_channels (int): Number of channels to slice out of the file.
Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header not in [b"PF", b"Pf"]:
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
pfm_channels = 3 if header == b"PF" else 1
data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:slice_channels, :, :]
return data.astype(np.float32)
|
def read_pfm(file_name: str, slice_channels: int = 2) -> np.ndarray:
"""Read file in .pfm format. Might contain
Args:
file_name (str): Path to the file.
slice_channels (int): Number of channels to slice out of the file.
Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header not in [b"PF", b"Pf"]:
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
pfm_channels = 3 if header == b"PF" else 1
data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:slice_channels, :, :]
return data.astype(np.float32)
|
48,627 |
def rel_filter_out(
lhs: pgast.SelectStmt, rhs: pgast.SelectStmt,
path_id: irast.PathId, *,
ctx: context.CompilerContextLevel,
) -> pgast.SelectStmt:
"""Filter elements out of the LHS that appear on the RHS"""
# We unfortunately need to wrap up the LHS in another select so that
# this works when the LHS is a set op.
rvar = rvar_for_rel(lhs, ctx=ctx)
qry = pgast.SelectStmt(from_clause=[rvar])
# Plumb it all up...
pathctx.put_path_value_rvar(qry, path_id, rvar, env=ctx.env)
if path_id.is_objtype_path():
pathctx.put_path_source_rvar(
qry, path_id, rvar, env=ctx.env)
pathctx.put_path_bond(qry, path_id)
# The useful work: grab the identity from the LHS and do a negated
# semi join against the RHS.
src_ref = pathctx.get_path_identity_var(
qry, path_id=path_id, env=ctx.env)
pathctx.get_path_identity_output(
rhs, path_id=path_id, env=ctx.env)
cond_expr = astutils.new_binop(src_ref, rhs, 'NOT IN')
qry.where_clause = astutils.extend_binop(
qry.where_clause, cond_expr)
return qry
|
def rel_filter_out(
lhs: pgast.SelectStmt, rhs: pgast.SelectStmt,
path_id: irast.PathId, *,
ctx: context.CompilerContextLevel,
) -> pgast.SelectStmt:
"""Filter elements out of the LHS that appear on the RHS"""
# We unfortunately need to wrap up the LHS in another select so that
# this works when the LHS is a set op.
rvar = rvar_for_rel(lhs, ctx=ctx)
qry = pgast.SelectStmt(from_clause=[rvar])
# Plumb it all up...
pathctx.put_path_value_rvar(qry, path_id, rvar, env=ctx.env)
if path_id.is_objtype_path():
pathctx.put_path_source_rvar(
qry, path_id, rvar, env=ctx.env)
pathctx.put_path_bond(qry, path_id)
# The useful work: grab the identity from the LHS and do an
# anti-join against the RHS.
src_ref = pathctx.get_path_identity_var(
qry, path_id=path_id, env=ctx.env)
pathctx.get_path_identity_output(
rhs, path_id=path_id, env=ctx.env)
cond_expr = astutils.new_binop(src_ref, rhs, 'NOT IN')
qry.where_clause = astutils.extend_binop(
qry.where_clause, cond_expr)
return qry
|
43,806 |
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def net_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the `net flow constraint <https://doi.org/10.1080/0020739X.2010.526248>`__
Hamiltonian.
The net-zero flow constraint is, for all :math:`i`:
.. math:: \sum_{j, (i, j) \in E} x_{ij} = \sum_{j, (j, i) \in E} x_{ji},
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\sum_{i \in V} \left((d_{i}^{\rm out} - d_{i}^{\rm in})\mathbb{I} -
\sum_{j, (i, j) \in E} Z_{ij} + \sum_{j, (j, i) \in E} Z_{ji} \right)^{2},
where :math:`V` are the graph vertices, :math:`d_{i}^{\rm out}` and :math:`d_{i}^{\rm in}` are
the outdegree and indegree, respectively, and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has a net zero flow.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the net-flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "in_edges") or not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_net_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
34,907 |
def sequence_mask(data, valid_length=None, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
`axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0,
the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have
shape [batch_size, MAX_LENGTH, ...].
`valid_length` gives the length of each sequence. `valid_length` should be
a 1D int array with positive ints and has dimension [batch_size,].
Parameters
----------
data : tvm.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
valid_length : tvm.Tensor or None
1-D with shape [batch_size,]
mask_value : float, default 0
The masking value, default
axis : int, default 0
axis of the length dimension, must be 0 or 1.
Returns
-------
output : tvm.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
"""
assert len(data.shape) >= 2,\
"only support data.ndim >= 2, received data.shape = {}".format(data.shape)
assert axis == 0 or axis == 1, "only support axis = 0, 1, received axis = {}".format(axis)
return cpp.sequence_mask(data, valid_length, mask_value, axis)
|
def sequence_mask(data, valid_length=None, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
`axis` means the axis of the length dimension and can only be 0 or 1. If `axis` is 0,
the data must have shape [MAX_LENGTH, batch_size, ...]. Otherwise (axis=1), the data must have
shape [batch_size, MAX_LENGTH, ...].
`valid_length` gives the length of each sequence. `valid_length` should be
a 1D int array with positive ints and has dimension [batch_size,].
Parameters
----------
data : tvm.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
valid_length : tvm.Tensor or None
1-D with shape [batch_size,]
mask_value : float, optional
The masking value, default
axis : int, default 0
axis of the length dimension, must be 0 or 1.
Returns
-------
output : tvm.Tensor
N-D with shape [MAX_LENGTH, batch_size, ...] or [batch_size, MAX_LENGTH, ...]
depending on the value of `axis`.
"""
assert len(data.shape) >= 2,\
"only support data.ndim >= 2, received data.shape = {}".format(data.shape)
assert axis == 0 or axis == 1, "only support axis = 0, 1, received axis = {}".format(axis)
return cpp.sequence_mask(data, valid_length, mask_value, axis)
|
8,906 |
def parse_insta_json(json):
# Parse JSON content
needed = _get_json_data(json)
dimensions = needed.get('dimensions', {})
owner = needed.get('owner', {})
# Build bot response
parts = []
# Title
if needed.get('is_video'):
title = "Video by "
else:
title = "Photo by "
# Author
iuser = owner.get('username')
ifname = owner.get('full_name')
if ifname and iuser:
parts.append('%s %s (@%s)' % (title, ifname, iuser))
elif iuser:
parts.append('%s @%s' % (title, iuser))
elif ifname:
parts.append('%s %s' % (title, ifname))
else:
parts.append('%s unknown user' % title)
# Media caption
try:
icap = needed['edge_media_to_caption']['edges'][0]['node']['text']
# Strip newlines
icap = icap.replace('\n', ' ')
# Truncate caption
icap = (icap[:256] + '…') if len(icap) > 256 else icap
except (KeyError, IndexError):
icap = None
if icap:
parts.append(icap)
# Media width and height
iwidth = dimensions.get('width') or None
iheight = dimensions.get('height') or None
if iwidth and iheight:
parts.append('%sx%s' % (iwidth, iheight))
# Likes
ilikes = str(needed.get('edge_media_preview_like', {}).get('count'))
if ilikes and ilikes.isdigit():
parts.append(
_format_counter(int(ilikes), 'No ♥ yet', '1 ♥', '{number} ♥s'))
# Comments
icomms = str(needed.get('edge_media_to_parent_comment', {}).get('count'))
if icomms and icomms.isdigit():
parts.append(_format_counter(int(icomms),
'No comment',
'1 comment',
'{number} comments'))
# Publishing date
idate = needed.get('taken_at_timestamp')
if idate:
dateformat = '%Y-%m-%d %H:%M:%S'
pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat)
parts.append('Uploaded: %s' % pubdate)
# Build the message
return ' | '.join(parts)
|
def parse_insta_json(json):
# Parse JSON content
needed = _get_json_data(json)
dimensions = needed.get('dimensions', {})
owner = needed.get('owner', {})
# Build bot response
parts = []
# Title
if needed.get('is_video'):
title = "Video by "
else:
title = "Photo by "
# Author
iuser = owner.get('username')
ifname = owner.get('full_name')
if ifname and iuser:
parts.append('%s %s (@%s)' % (title, ifname, iuser))
elif iuser:
parts.append('%s @%s' % (title, iuser))
elif ifname:
parts.append('%s %s' % (title, ifname))
else:
parts.append('%s unknown user' % title)
# Media caption
try:
icap = needed['edge_media_to_caption']['edges'][0]['node']['text']
# Strip newlines
icap = icap.replace('\n', ' ')
# Truncate caption
icap = (icap[:256] + '…') if len(icap) > 256 else icap
except (KeyError, IndexError):
icap = None
if icap:
parts.append(icap)
# Media width and height
iwidth = dimensions.get('width') or None
iheight = dimensions.get('height') or None
if iwidth and iheight:
parts.append('%sx%s' % (iwidth, iheight))
# Likes
ilikes = str(needed.get('edge_media_preview_like', {}).get('count'))
if ilikes and ilikes.isdigit():
parts.append(
_format_counter(int(ilikes), 'No ♥ yet', '1 ♥', '{number} ♥s'))
# Comments
icomms = str(needed.get('edge_media_to_parent_comment', {}).get('count'))
if icomms and icomms.isdigit():
parts.append(_format_counter(int(icomms),
'No comments',
'1 comment',
'{number} comments'))
# Publishing date
idate = needed.get('taken_at_timestamp')
if idate:
dateformat = '%Y-%m-%d %H:%M:%S'
pubdate = datetime.utcfromtimestamp(idate).strftime(dateformat)
parts.append('Uploaded: %s' % pubdate)
# Build the message
return ' | '.join(parts)
|
7,332 |
def _clean_email(email):
if not '@' in email:
return
name, domain = email.split('@')
name = name.split('+', 1)[0]
return f"{name.lower()}@{domain.lower()}"
|
def _clean_email(email):
if not '@' in email:
return
name, domain = email.split('@')
name = name.split('+', 1)[0]
return f"{name}@{domain}".lower()
|
31,787 |
def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
accountDetails = []
account = {'AccountId': args.get('accountId'), 'Email': args.get('email')}
accountDetails.append(account)
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails=accountDetails
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
|
def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
accountDetails = []
account = {'AccountId': args.get('accountId'), 'Email': args.get('email')}
accountDetails.append(account)
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails=account_details
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
|
26,125 |
def sdk_send_freeze_ledgers(looper, sdk_pool_handle, sdk_wallets, ledgers_ids: [int]):
req = build_freeze_ledgers_request(sdk_wallets[0][1], ledgers_ids) # json.dumps(params)
signed_reqs = sdk_multi_sign_request_objects(looper, sdk_wallets, [req])
reps = sdk_send_signed_requests(sdk_pool_handle, signed_reqs)
return sdk_get_and_check_replies(looper, reps)[0]
|
def sdk_send_freeze_ledgers(looper, sdk_pool_handle, sdk_wallets, ledgers_ids: [int]):
req = build_freeze_ledgers_request(sdk_wallets[0][1], ledgers_ids)
signed_reqs = sdk_multi_sign_request_objects(looper, sdk_wallets, [req])
reps = sdk_send_signed_requests(sdk_pool_handle, signed_reqs)
return sdk_get_and_check_replies(looper, reps)[0]
|
8,490 |
def find_binary_dependencies(binaries, binding_redirects, import_packages):
"""
Find dynamic dependencies (linked shared libraries) for the provided list of binaries.
Before scanning the binaries, the function imports the packages from provided list of packages to import, to ensure
that library search paths are properly set up (i.e., if a package sets up search paths when imported9. Therefore,
this function *must* always be called in an isolated subprocess to avoid import leaks!
binaries
List of binaries to scan for dynamic dependencies.
binding_redirects
List of assembly binding redirects.
import_packages
List of packages to import prior to scanning binaries.
:return: expanded list of binaries and then dependencies.
"""
from PyInstaller.depend import bindepend
# Import collected packages to set up environment
for package in import_packages:
try:
__import__(package)
except Exception:
pass
# Search for dependencies of the given binaries
return bindepend.Dependencies(binaries, redirects=binding_redirects)
|
def find_binary_dependencies(binaries, binding_redirects, import_packages):
"""
Find dynamic dependencies (linked shared libraries) for the provided list of binaries.
Before scanning the binaries, the function imports the packages from provided list of packages to import, to ensure
that library search paths are properly set up (i.e., if a package sets up search paths when imported). Therefore,
this function *must* always be called in an isolated subprocess to avoid import leaks!
binaries
List of binaries to scan for dynamic dependencies.
binding_redirects
List of assembly binding redirects.
import_packages
List of packages to import prior to scanning binaries.
:return: expanded list of binaries and then dependencies.
"""
from PyInstaller.depend import bindepend
# Import collected packages to set up environment
for package in import_packages:
try:
__import__(package)
except Exception:
pass
# Search for dependencies of the given binaries
return bindepend.Dependencies(binaries, redirects=binding_redirects)
|
40,992 |
def site_metas(request):
"""
Context processor to add all information required by Richie CMS templates and frontend.
If `CDN_DOMAIN` settings is defined we add it in the context. It allows
to load statics js on a CDN like cloudfront.
"""
site_current = Site.objects.get_current()
protocol = "https" if request.is_secure() else "http"
authentication_delegation = getattr(settings, "AUTHENTICATION_DELEGATION", False)
context = {
**{
f"GLIMPSE_PAGINATION_{k.upper()}": v
for k, v in {
**defaults.GLIMPSE_PAGINATION,
**getattr(settings, "RICHIE_GLIMPSE_PAGINATION", {}),
}.items()
},
"SITE": {
"name": site_current.name,
"domain": site_current.domain,
"web_url": f"{protocol:s}://{site_current.domain:s}",
},
"FRONTEND_CONTEXT": {
"context": {
"csrftoken": get_token(request),
"environment": getattr(settings, "ENVIRONMENT", ""),
"release": getattr(settings, "RELEASE", ""),
"sentry_dsn": getattr(settings, "SENTRY_DSN", ""),
}
},
}
if getattr(settings, "CDN_DOMAIN", False):
context["CDN_DOMAIN"] = settings.CDN_DOMAIN
if getattr(settings, "AUTHENTICATION_DELEGATION", False):
context.update(
{
"AUTHENTICATION": {
"PROFILE_URLS": json.dumps(
[
{
"label": str(url["label"]),
"action": str(
url["href"].format(
base_url=authentication_delegation["BASE_URL"]
)
),
}
for url in getattr(
authentication_delegation, "PROFILE_URLS", []
)
]
),
}
}
)
context["FRONTEND_CONTEXT"]["context"].update(
{
"authentication": {
"endpoint": authentication_delegation["BASE_URL"],
"backend": authentication_delegation["BACKEND"],
}
}
)
if getattr(settings, "LMS_BACKENDS", False):
context["FRONTEND_CONTEXT"]["context"].update(
{
"lms_backends": [
{
"endpoint": lms["BASE_URL"],
"backend": lms["BACKEND"],
"course_regexp": lms["JS_COURSE_REGEX"],
"selector_regexp": lms["JS_SELECTOR_REGEX"],
}
for lms in getattr(settings, "LMS_BACKENDS", [])
]
}
),
context["FRONTEND_CONTEXT"] = json.dumps(context["FRONTEND_CONTEXT"])
return context
|
def site_metas(request):
"""
Context processor to add all information required by Richie CMS templates and frontend.
If `CDN_DOMAIN` settings is defined we add it in the context. It allows
to load statics js on a CDN like cloudfront.
"""
site_current = Site.objects.get_current()
protocol = "https" if request.is_secure() else "http"
authentication_delegation = getattr(settings, "AUTHENTICATION_DELEGATION", False)
context = {
**{
f"GLIMPSE_PAGINATION_{k.upper()}": v
for k, v in {
**defaults.GLIMPSE_PAGINATION,
**getattr(settings, "RICHIE_GLIMPSE_PAGINATION", {}),
}.items()
},
"SITE": {
"name": site_current.name,
"domain": site_current.domain,
"web_url": f"{protocol:s}://{site_current.domain:s}",
},
"FRONTEND_CONTEXT": {
"context": {
"csrftoken": get_token(request),
"environment": getattr(settings, "ENVIRONMENT", ""),
"release": getattr(settings, "RELEASE", ""),
"sentry_dsn": getattr(settings, "SENTRY_DSN", ""),
}
},
}
if getattr(settings, "CDN_DOMAIN", False):
context["CDN_DOMAIN"] = settings.CDN_DOMAIN
if getattr(settings, "AUTHENTICATION_DELEGATION", None):
context.update(
{
"AUTHENTICATION": {
"PROFILE_URLS": json.dumps(
[
{
"label": str(url["label"]),
"action": str(
url["href"].format(
base_url=authentication_delegation["BASE_URL"]
)
),
}
for url in getattr(
authentication_delegation, "PROFILE_URLS", []
)
]
),
}
}
)
context["FRONTEND_CONTEXT"]["context"].update(
{
"authentication": {
"endpoint": authentication_delegation["BASE_URL"],
"backend": authentication_delegation["BACKEND"],
}
}
)
if getattr(settings, "LMS_BACKENDS", False):
context["FRONTEND_CONTEXT"]["context"].update(
{
"lms_backends": [
{
"endpoint": lms["BASE_URL"],
"backend": lms["BACKEND"],
"course_regexp": lms["JS_COURSE_REGEX"],
"selector_regexp": lms["JS_SELECTOR_REGEX"],
}
for lms in getattr(settings, "LMS_BACKENDS", [])
]
}
),
context["FRONTEND_CONTEXT"] = json.dumps(context["FRONTEND_CONTEXT"])
return context
|
46,553 |
def prepare_full_genesis_deposits(spec,
amount,
pubkey_max_range,
pubkey_min_range=0,
signed=False,
deposit_data_list=None):
if deposit_data_list is None:
deposit_data_list = []
genesis_deposits = []
for validator_index in range(pubkey_min_range, pubkey_max_range):
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
# insecurely use pubkey as withdrawal key if no credentials provided
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit, root, deposit_data_list = build_deposit(
spec,
deposit_data_list=deposit_data_list,
pubkey=pubkey,
privkey=privkey,
amount=amount,
withdrawal_credentials=withdrawal_credentials,
signed=signed,
)
genesis_deposits.append(deposit)
return genesis_deposits, root, deposit_data_list
|
def prepare_full_genesis_deposits(spec,
amount,
pubkey_max_range,
pubkey_min_range=0,
signed=False,
deposit_data_list=None):
if deposit_data_list is None:
deposit_data_list = []
genesis_deposits = []
for validator_index in range(min_validator_index, deposit_count + min_validator_index):
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
# insecurely use pubkey as withdrawal key if no credentials provided
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
deposit, root, deposit_data_list = build_deposit(
spec,
deposit_data_list=deposit_data_list,
pubkey=pubkey,
privkey=privkey,
amount=amount,
withdrawal_credentials=withdrawal_credentials,
signed=signed,
)
genesis_deposits.append(deposit)
return genesis_deposits, root, deposit_data_list
|
46,026 |
def find_homography_lines_dlt(ls1_: Tensor,
ls2_: Tensor,
weights: Optional[Tensor] = None) -> Tensor:
r"""Compute the homography matrix using the DLT formulation for line correspondences.
See :cite:`homolines2001` for details.
The linear system is solved by using the Weighted Least Squares Solution for the 4 Line correspondences algorithm.
Args:
ls1: A set of line segments in the first image with a tensor shape :math:`(B, N, 2, 2)`.
ls2: A set of line segments in the second image with a tensor shape :math:`(B, N, 2, 2)`.
weights: Tensor containing the weights per point correspondence with a shape of :math:`(B, N)`.
Returns:
the computed homography matrix with shape :math:`(B, 3, 3)`.
"""
if len(ls1_.shape) == 3:
ls1: Tensor = ls1_[None]
else:
ls1 = ls1_
if len(ls2_.shape) == 3:
ls2: Tensor = ls2_[None]
else:
ls2 = ls2_
KORNIA_CHECK_SHAPE(ls1, ["B", "N", "2", "2"])
KORNIA_CHECK_SHAPE(ls2, ["B", "N", "2", "2"])
B, N = ls1.shape[:2]
device, dtype = _extract_device_dtype([ls1, ls2])
points1 = ls1.reshape(B, 2 * N, 2)
points2 = ls2.reshape(B, 2 * N, 2)
points1_norm, transform1 = normalize_points(points1)
points2_norm, transform2 = normalize_points(points2)
lst1, le1 = torch.chunk(points1_norm, dim=1, chunks=2)
lst2, le2 = torch.chunk(points2_norm, dim=1, chunks=2)
xs1, ys1 = torch.chunk(lst1, dim=-1, chunks=2) # BxNx1
xs2, ys2 = torch.chunk(lst2, dim=-1, chunks=2) # BxNx1
xe1, ye1 = torch.chunk(le1, dim=-1, chunks=2) # BxNx1
xe2, ye2 = torch.chunk(le2, dim=-1, chunks=2) # BxNx1
A = ys2 - ye2
B = xe2 - xs2 # type: ignore
C = xs2 * ye2 - xe2 * ys2
eps: float = 1e-8
# http://diis.unizar.es/biblioteca/00/09/000902.pdf
ax = torch.cat([A * xs1, A * ys1, A, B * xs1, B * ys1, B, C * xs1, C * ys1, C], dim=-1) # type: ignore
ay = torch.cat([A * xe1, A * ye1, A, B * xe1, B * ye1, B, C * xe1, C * ye1, C], dim=-1) # type: ignore
A = torch.cat((ax, ay), dim=-1).reshape(ax.shape[0], -1, ax.shape[-1]) # type: ignore
if weights is None:
# All points are equally important
A = A.transpose(-2, -1) @ A
else:
# We should use provided weights
if not ((len(weights.shape) == 2) and (weights.shape == ls1.shape[:2])):
raise AssertionError(weights.shape)
w_diag = torch.diag_embed(weights.unsqueeze(dim=-1).repeat(1, 1, 2).reshape(weights.shape[0], -1))
A = A.transpose(-2, -1) @ w_diag @ A
try:
_, _, V = torch.svd(A)
except RuntimeError:
warnings.warn('SVD did not converge', RuntimeWarning)
return torch.empty((points1_norm.size(0), 3, 3), device=device, dtype=dtype)
H = V[..., -1].view(-1, 3, 3)
H = transform2.inverse() @ (H @ transform1)
H_norm = H / (H[..., -1:, -1:] + eps)
return H_norm
|
def find_homography_lines_dlt(ls1_: Tensor,
ls2_: Tensor,
weights: Optional[Tensor] = None) -> Tensor:
r"""Compute the homography matrix using the DLT formulation for line correspondences.
See :cite:`homolines2001` for details.
The linear system is solved by using the Weighted Least Squares Solution for the 4 Line correspondences algorithm.
Args:
ls1: A set of line segments in the first image with a tensor shape :math:`(B, N, 2, 2)`.
ls2: A set of line segments in the second image with a tensor shape :math:`(B, N, 2, 2)`.
weights: Tensor containing the weights per point correspondence with a shape of :math:`(B, N)`.
Returns:
the computed homography matrix with shape :math:`(B, 3, 3)`.
"""
if len(ls1_.shape) == 3:
ls1 = ls1[None]
else:
ls1 = ls1_
if len(ls2_.shape) == 3:
ls2: Tensor = ls2_[None]
else:
ls2 = ls2_
KORNIA_CHECK_SHAPE(ls1, ["B", "N", "2", "2"])
KORNIA_CHECK_SHAPE(ls2, ["B", "N", "2", "2"])
B, N = ls1.shape[:2]
device, dtype = _extract_device_dtype([ls1, ls2])
points1 = ls1.reshape(B, 2 * N, 2)
points2 = ls2.reshape(B, 2 * N, 2)
points1_norm, transform1 = normalize_points(points1)
points2_norm, transform2 = normalize_points(points2)
lst1, le1 = torch.chunk(points1_norm, dim=1, chunks=2)
lst2, le2 = torch.chunk(points2_norm, dim=1, chunks=2)
xs1, ys1 = torch.chunk(lst1, dim=-1, chunks=2) # BxNx1
xs2, ys2 = torch.chunk(lst2, dim=-1, chunks=2) # BxNx1
xe1, ye1 = torch.chunk(le1, dim=-1, chunks=2) # BxNx1
xe2, ye2 = torch.chunk(le2, dim=-1, chunks=2) # BxNx1
A = ys2 - ye2
B = xe2 - xs2 # type: ignore
C = xs2 * ye2 - xe2 * ys2
eps: float = 1e-8
# http://diis.unizar.es/biblioteca/00/09/000902.pdf
ax = torch.cat([A * xs1, A * ys1, A, B * xs1, B * ys1, B, C * xs1, C * ys1, C], dim=-1) # type: ignore
ay = torch.cat([A * xe1, A * ye1, A, B * xe1, B * ye1, B, C * xe1, C * ye1, C], dim=-1) # type: ignore
A = torch.cat((ax, ay), dim=-1).reshape(ax.shape[0], -1, ax.shape[-1]) # type: ignore
if weights is None:
# All points are equally important
A = A.transpose(-2, -1) @ A
else:
# We should use provided weights
if not ((len(weights.shape) == 2) and (weights.shape == ls1.shape[:2])):
raise AssertionError(weights.shape)
w_diag = torch.diag_embed(weights.unsqueeze(dim=-1).repeat(1, 1, 2).reshape(weights.shape[0], -1))
A = A.transpose(-2, -1) @ w_diag @ A
try:
_, _, V = torch.svd(A)
except RuntimeError:
warnings.warn('SVD did not converge', RuntimeWarning)
return torch.empty((points1_norm.size(0), 3, 3), device=device, dtype=dtype)
H = V[..., -1].view(-1, 3, 3)
H = transform2.inverse() @ (H @ transform1)
H_norm = H / (H[..., -1:, -1:] + eps)
return H_norm
|
7,447 |
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
specify the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
def imsave(fname, arr, **kwargs):
"""Load a tiff image to file.
Parameters
----------
fname : str or file
File name or file-like-object.
arr : ndarray
The array to write
kwargs : keyword pairs, optional
Additional keyword arguments to pass through (see ``tifffile``'s
``imwrite`` function).
Notes
-----
Provided by the tifffile library [1]_, and supports many
advanced image types including multi-page and floating point.
This implementation will set `photomotric='RGB'` when writing if the first
or last axis of arr has shape 3 or 4. To override this, explicitly
pass the photometric kwarg.
This implementation will set `planarconfig='SEPARATE'` when writing if the
first axis of arr has shape 3 or 4. To override this, explicitly
specify the planarconfig kwarg.
References
----------
.. [1] https://pypi.org/project/tifffile/
"""
if arr.shape[0] in [3, 4]:
if 'planarconfig' not in kwargs:
kwargs['planarconfig'] = 'SEPARATE'
rgb = True
else:
rgb = arr.shape[-1] in [3, 4]
if rgb and 'photometric' not in kwargs:
kwargs['photometric'] = 'RGB'
return tifffile_imwrite(fname, arr, **kwargs)
|
44,934 |
def get_flow_run_command(flow_run: GraphQLResult) -> str:
"""
Determine the flow run command to use based on a flow's version. This is due to a command
deprecation in `0.13.0` where `execute cloud-flow` was changes to `execute flow-run`
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: a prefect CLI command to execute a flow run
"""
core_version = flow_run.flow.core_version
if LooseVersion(core_version) < LooseVersion("0.13.0"):
return "prefect execute cloud-flow"
return "prefect execute flow-run"
|
def get_flow_run_command(flow_run: GraphQLResult) -> str:
"""
Determine the flow run command to use based on a flow's version. This is due to a command
deprecation in `0.13.0` where `execute cloud-flow` was changes to `execute flow-run`
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: a prefect CLI command to execute a flow run
"""
core_version = flow_run.flow.core_version or "0.0.0"
if LooseVersion(core_version) < LooseVersion("0.13.0"):
return "prefect execute cloud-flow"
return "prefect execute flow-run"
|
24,845 |
def my_func(self, doc_type): # [missing-return-type-doc, missing-return-doc]
"""This is a docstring.
:param doc_type: Sphinx
:type doc_type: str
"""
return False
|
def warn_missing_sphinx_returns(self, doc_type): # [missing-return-type-doc, missing-return-doc]
"""This is a docstring.
:param doc_type: Sphinx
:type doc_type: str
"""
return False
|
55,573 |
def test_app_receives_http_disconnect_while_sending_if_discarded(test_client_factory):
class DiscardingMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
await call_next(request)
return PlainTextResponse("Custom")
async def downstream_app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
(b"content-type", b"text/plain"),
],
}
)
async with anyio.create_task_group() as task_group:
async def cancel_on_disconnect():
while True:
message = await receive()
if message["type"] == "http.disconnect":
task_group.cancel_scope.cancel()
break
task_group.start_soon(cancel_on_disconnect)
await send(
{
"type": "http.response.body",
"body": b"chunk",
"more_body": True,
}
)
pytest.fail(
"http.disconnect should have been received and canceled the scope"
)
app = DiscardingMiddleware(downstream_app)
client = test_client_factory(app)
response = client.get("/does_not_exist")
assert response.text == "Custom"
|
def test_app_receives_http_disconnect_while_sending_if_discarded(test_client_factory):
class DiscardingMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
await call_next(request)
return PlainTextResponse("Custom")
async def downstream_app(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [
(b"content-type", b"text/plain"),
],
}
)
async with anyio.create_task_group() as task_group:
async def cancel_on_disconnect():
while True:
message = await receive()
if message["type"] == "http.disconnect":
task_group.cancel_scope.cancel()
break
task_group.start_soon(cancel_on_disconnect)
await send(
{
"type": "http.response.body",
"body": b"chunk",
"more_body": True,
}
)
pytest.fail(
"http.disconnect should have been received and canceled the scope"
) # pragma: no cover
app = DiscardingMiddleware(downstream_app)
client = test_client_factory(app)
response = client.get("/does_not_exist")
assert response.text == "Custom"
|
14,474 |
def _remove_prefix(text: str, prefix: str) -> str:
return re.sub(r'^{0}'.format(re.escape(prefix)), '', text)
|
def _remove_prefix(text: str, prefix: str) -> str:
return text[len(prefix):] if text.startswith(prefix) else text
|
44,918 |
def between_times(start: time, end: time) -> Callable[[datetime], bool]:
"""
Filter that allows events between a start time and end time
For example, `between_times(start=datetime.time(14), end=datetime.time(16))` would only
allow runs between the hours of 2 PM and 5 PM in the given timezone.
Args:
- start (time): the start time
- end (time): the end time
Returns:
- Callable[[datetime], bool]: a filter function
"""
def _filter_fn(dt: datetime) -> bool:
# if the start is before the end, these represents times in the same day
if start <= end:
return dt.time() >= start and dt.time() <= end
# otherwise they represent times across two days
else:
return dt.time() >= start or dt.time() <= end
return _filter_fn
|
def between_times(start: time, end: time) -> Callable[[datetime], bool]:
"""
Filter that allows events between a start time and end time
For example, `between_times(start=datetime.time(14), end=datetime.time(16))` would only
allow runs between the hours of 2 PM and 4 PM in the given timezone.
Args:
- start (time): the start time
- end (time): the end time
Returns:
- Callable[[datetime], bool]: a filter function
"""
def _filter_fn(dt: datetime) -> bool:
# if the start is before the end, these represents times in the same day
if start <= end:
return dt.time() >= start and dt.time() <= end
# otherwise they represent times across two days
else:
return dt.time() >= start or dt.time() <= end
return _filter_fn
|
47,505 |
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file.replace(os.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
|
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
|
34,160 |
def get_validated_path(
current: Optional[Text],
parameter: Text,
default: Optional[Text] = None,
none_is_valid: bool = False,
) -> Optional[Text]:
"""Check whether a file path or its default value is valid and returns it.
Args:
current: The parsed value.
parameter: The name of the parameter.
default: The default value of the parameter.
none_is_valid: `True` if `None` is valid value for the path,
else `False``
Returns:
The current value if it was valid, else the default value of the
argument if it is valid, else `None`.
"""
if current is None or current is not None and not os.path.exists(current):
if default is not None and os.path.exists(default):
reason_str = "'{}' not found.".format(current)
if current is None:
reason_str = "Parameter '{}' not set.".format(parameter)
logger.debug(
"{} Using default location '{}' instead." "".format(reason_str, default)
)
current = default
elif none_is_valid:
current = None
else:
cancel_cause_not_found(current, parameter, default)
return current
|
def get_validated_path(
current: Optional[Text],
parameter: Text,
default: Optional[Text] = None,
none_is_valid: bool = False,
) -> Optional[Text]:
"""Check whether a file path or its default value is valid and returns it.
Args:
current: The parsed value.
parameter: The name of the parameter.
default: The default value of the parameter.
none_is_valid: `True` if `None` is valid value for the path,
else `False``
Returns:
The current value if it was valid, else the default value of the
argument if it is valid, else `None`.
"""
if current is None or current is not None and not os.path.exists(current):
if default is not None and os.path.exists(default):
reason_str = "'{}' not found.".format(current)
if current is None:
reason_str = "Parameter '{}' not set.".format(parameter)
logger.debug(
"{} Using default location '{}' instead.".format(reason_str, default)
)
current = default
elif none_is_valid:
current = None
else:
cancel_cause_not_found(current, parameter, default)
return current
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.