id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
40,063
def _concrete_call( call: Call, state: GlobalState, address: int, meminstart: Variable ) -> list: """ :param call: The current call's information :param state: The current state :param address: The PC address :param meminstart: memory starting position :return: issues """ if not re.search(r"calldata.*\[0", str(state.mstate.memory[meminstart.val])): return [] issue = Issue( contract=call.node.contract_name, function_name=call.node.function_name, address=address, swc_id=DELEGATECALL_TO_UNTRUSTED_CONTRACT, bytecode=state.environment.code.bytecode, title="Call data forwarded with delegatecall()", _type="Informational", gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used), ) issue.description = ( "This contract forwards its call data via DELEGATECALL in its fallback function. " "This means that any function in the called contract can be executed. Note that the callee contract will have " "access to the storage of the calling contract.\n" ) target = hex(call.to.val) if call.to.type == VarType.CONCRETE else str(call.to) issue.description += "DELEGATECALL target: {}".format(target) return [issue]
def _concrete_call( call: Call, state: GlobalState, address: int, meminstart: Variable ) -> List[Issue]: """ :param call: The current call's information :param state: The current state :param address: The PC address :param meminstart: memory starting position :return: issues """ if not re.search(r"calldata.*\[0", str(state.mstate.memory[meminstart.val])): return [] issue = Issue( contract=call.node.contract_name, function_name=call.node.function_name, address=address, swc_id=DELEGATECALL_TO_UNTRUSTED_CONTRACT, bytecode=state.environment.code.bytecode, title="Call data forwarded with delegatecall()", _type="Informational", gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used), ) issue.description = ( "This contract forwards its call data via DELEGATECALL in its fallback function. " "This means that any function in the called contract can be executed. Note that the callee contract will have " "access to the storage of the calling contract.\n" ) target = hex(call.to.val) if call.to.type == VarType.CONCRETE else str(call.to) issue.description += "DELEGATECALL target: {}".format(target) return [issue]
19,897
def _simplify_ast(raw_ast): """Simplify an AST that comes out of the parser As well as replacing pyparsing's ParseResults with bare lists, this merges adjacent non-condition words . For example, "a b" parses to ["a", "b"]. This function merges that to ["a b"]. The idea is that this will be much more efficient to match against tags for the vast majority of ASTs, which have many more raw words than they have conditions. A simplified AST is a list whose items are strings (representing bare words) or tuples of the form (negated, flag, ast), where negated is a bool, flag is a string and ast is another simplified ast. """ children = [] str_acc = [] for expr in raw_ast: if isinstance(expr, str): str_acc.append(expr) continue # We have a term that isn't a string. This must be a conditional. Join # together any items in str_acc and add them to children then recurse # to simplify the conditional's sub-expression. if str_acc: children.append(" ".join(str_acc)) str_acc = [] negated, flag, exprs = expr children.append((negated, flag, _simplify_ast(exprs))) if str_acc: children.append(" ".join(str_acc)) return children
def _simplify_ast(raw_ast): """Simplify an AST that comes out of the parser As well as replacing pyparsing's ParseResults with bare lists, this merges adjacent non-condition words. For example, "a b" parses to ["a", "b"]. This function merges that to ["a b"]. The idea is that this will be much more efficient to match against tags for the vast majority of ASTs, which have many more raw words than they have conditions. A simplified AST is a list whose items are strings (representing bare words) or tuples of the form (negated, flag, ast), where negated is a bool, flag is a string and ast is another simplified ast. """ children = [] str_acc = [] for expr in raw_ast: if isinstance(expr, str): str_acc.append(expr) continue # We have a term that isn't a string. This must be a conditional. Join # together any items in str_acc and add them to children then recurse # to simplify the conditional's sub-expression. if str_acc: children.append(" ".join(str_acc)) str_acc = [] negated, flag, exprs = expr children.append((negated, flag, _simplify_ast(exprs))) if str_acc: children.append(" ".join(str_acc)) return children
25,756
def iplot(network, fig=None, bus_colors='blue', bus_colorscale=None, bus_colorbar=None, bus_sizes=10, bus_text=None, line_colors='green', line_widths=2, line_text=None, layouter=None, title="", size=None, branch_components=['Line', 'Link'], iplot=True, jitter=None, mapbox=False, mapbox_style='open-street-map', mapbox_token="", mapbox_parameters={}): """ Plot the network buses and lines interactively using plotly. Parameters ---------- fig : dict, default None If not None, figure is built upon this fig. bus_colors : dict/pandas.Series Colors for the buses, defaults to "b" bus_colorscale : string Name of colorscale if bus_colors are floats, e.g. 'Jet', 'Viridis' bus_colorbar : dict Plotly colorbar, e.g. {'title' : 'my colorbar'} bus_sizes : dict/pandas.Series Sizes of bus points, defaults to 10 bus_text : dict/pandas.Series Text for each bus, defaults to bus names line_colors : dict/pandas.Series Colors for the lines, defaults to "g" for Lines and "cyan" for Links. Colors for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_widths : dict/pandas.Series Widths of lines, defaults to 2. Widths for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_text : dict/pandas.Series Text for lines, defaults to line names. Text for branches other than Lines can be specified using a pandas Series with a MultiIndex. layouter : networkx.drawing.layout function, default None Layouting function from `networkx <https://networkx.github.io/>`_ which overrules coordinates given in ``network.buses[['x','y']]``. See `list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_ of available options. title : string Graph title size : None|tuple Tuple specifying width and height of figure; e.g. (width, heigh). branch_components : list of str Branch components to be plotted, defaults to Line and Link. iplot : bool, default True Automatically do an interactive plot of the figure. jitter : None|float Amount of random noise to add to bus positions to distinguish overlapping buses mapbox : bool, default False Switch to use Mapbox. mapbox_style : str, defaul 'open-street-map' Define the mapbox layout style of the interactive plot. If this is set to a mapbox layout, the argument mapbox_token must be a valid Mapbox API access token. Valid open layouts are: open-street-map, white-bg, carto-positron, carto-darkmatter, stamen-terrain, stamen-toner, stamen-watercolor Valid mapbox layouts are: basic, streets, outdoors, light, dark, satellite, satellite-streets mapbox_token : string Mapbox API access token. Obtain from https://www.mapbox.com. Can also be included in mapbox_parameters as `accesstoken=mapbox_token`. mapbox_parameters : dict Configuration parameters of the Mapbox layout. E.g. {"bearing": 5, "pitch": 10, "zoom": 1, "style": 'dark'}. Returns ------- fig: dictionary for plotly figure """ defaults_for_branches = { 'Link': dict(color="cyan", width=2), 'Line': dict(color="blue", width=2), 'Transformer': dict(color='green', width=2) } if fig is None: fig = dict(data=[],layout={}) if bus_text is None: bus_text = 'Bus ' + network.buses.index x, y = _get_coordinates(network, layouter=layouter) if jitter is not None: x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x)) y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y)) bus_trace = dict(x=x, y=y, text=bus_text, type="scatter", mode="markers", hoverinfo="text", marker=dict(color=bus_colors, size=bus_sizes), ) if bus_colorscale is not None: bus_trace['marker']['colorscale'] = bus_colorscale if bus_colorbar is not None: bus_trace['marker']['colorbar'] = bus_colorbar def as_branch_series(ser): if isinstance(ser, dict) and set(ser).issubset(branch_components): return pd.Series(ser) elif isinstance(ser, pd.Series): if isinstance(ser.index, pd.MultiIndex): return ser index = ser.index ser = ser.values else: index = network.lines.index return pd.Series(ser, index=pd.MultiIndex(levels=(["Line"], index), labels=(np.zeros(len(index)), np.arange(len(index))))) line_colors = as_branch_series(line_colors) line_widths = as_branch_series(line_widths) if line_text is not None: line_text = as_branch_series(line_text) shapes = [] shape_traces = [] for c in network.iterate_components(branch_components): l_defaults = defaults_for_branches[c.name] l_widths = line_widths.get(c.name, l_defaults['width']) l_colors = line_colors.get(c.name, l_defaults['color']) if line_text is None: l_text = c.name + ' ' + c.df.index else: l_text = line_text.get(c.name) if isinstance(l_colors, pd.Series): if issubclass(l_colors.dtype.type, np.number): l_colors = None else: l_colors.fillna(l_defaults['color'], inplace=True) x0 = c.df.bus0.map(x) x1 = c.df.bus1.map(x) y0 = c.df.bus0.map(y) y1 = c.df.bus1.map(y) for line in c.df.index: color = l_colors if isinstance(l_colors, string_types) else l_colors[line] width = l_widths if isinstance(l_widths, (int, float)) else l_widths[line] shapes.append(dict(type='line', x0=x0[line], y0=y0[line], x1=x1[line], y1=y1[line], opacity=0.7, line=dict(color=color, width=width))) shape_traces.append(dict(x=0.5*(x0+x1), y=0.5*(y0+y1), text=l_text, type="scatter", mode="markers", hoverinfo="text", marker=dict(opacity=0.))) if mapbox: shape_traces_latlon = [] for st in shape_traces: st['lon'] = st.pop('x') st['lat'] = st.pop('y') shape_traces_latlon.append(go.Scattermapbox(st)) shape_traces = shape_traces_latlon shapes_mapbox = [] for s in shapes: s['lon'] = [s.pop('x0'), s.pop('x1')] s['lat'] = [s.pop('y0'), s.pop('y1')] shapes_mapbox.append(go.Scattermapbox(s, mode='lines')) shapes = shapes_mapbox bus_trace['lon'] = bus_trace.pop('x') bus_trace['lat'] = bus_trace.pop('y') bus_trace = go.Scattermapbox(bus_trace) fig['data'].extend(shapes + shape_traces + [bus_trace]) else: fig['data'].extend([bus_trace]+shape_traces) fig['layout'].update(dict(title=title, hovermode='closest', showlegend=False)) if size is not None: assert len(size) == 2, "Parameter size must specify a tuple (width, height)." fig['layout'].update(dict(width=size[0], height=size[1])) if mapbox: if mapbox_token != "": mapbox_parameters['accesstoken'] = mapbox_token mapbox_parameters.setdefault('style', mapbox_style) if mapbox_parameters['style'] in _token_required_mb_styles: assert 'accesstoken' in mapbox_parameters.keys(), ("Using Mapbox " "layout styles requires a valid access token from https://www.mapbox.com/, " f"style which do not require a token are:\n{', '.join(_open__mb_styles)}.") if 'center' not in mapbox_parameters.keys(): lon=(network.buses.x.min() + network.buses.x.max()) / 2 lat=(network.buses.y.min() + network.buses.y.max()) / 2 mapbox_parameters['center'] = dict(lat=lat, lon=lon) if 'zoom' not in mapbox_parameters.keys(): mapbox_parameters['zoom'] = 2 fig['layout']['mapbox'] = mapbox_parameters else: fig['layout']['shapes'] = shapes if iplot: if not pltly_present: logger.warning("Plotly is not present, so interactive plotting won't work.") else: pltly.iplot(fig) return fig
def iplot(network, fig=None, bus_colors='blue', bus_colorscale=None, bus_colorbar=None, bus_sizes=10, bus_text=None, line_colors='green', line_widths=2, line_text=None, layouter=None, title="", size=None, branch_components=['Line', 'Link'], iplot=True, jitter=None, mapbox=False, mapbox_style='open-street-map', mapbox_token="", mapbox_parameters={}): """ Plot the network buses and lines interactively using plotly. Parameters ---------- fig : dict, default None If not None, figure is built upon this fig. bus_colors : dict/pandas.Series Colors for the buses, defaults to "b" bus_colorscale : string Name of colorscale if bus_colors are floats, e.g. 'Jet', 'Viridis' bus_colorbar : dict Plotly colorbar, e.g. {'title' : 'my colorbar'} bus_sizes : dict/pandas.Series Sizes of bus points, defaults to 10 bus_text : dict/pandas.Series Text for each bus, defaults to bus names line_colors : dict/pandas.Series Colors for the lines, defaults to "g" for Lines and "cyan" for Links. Colors for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_widths : dict/pandas.Series Widths of lines, defaults to 2. Widths for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_text : dict/pandas.Series Text for lines, defaults to line names. Text for branches other than Lines can be specified using a pandas Series with a MultiIndex. layouter : networkx.drawing.layout function, default None Layouting function from `networkx <https://networkx.github.io/>`_ which overrules coordinates given in ``network.buses[['x','y']]``. See `list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_ of available options. title : string Graph title size : None|tuple Tuple specifying width and height of figure; e.g. (width, heigh). branch_components : list of str Branch components to be plotted, defaults to Line and Link. iplot : bool, default True Automatically do an interactive plot of the figure. jitter : None|float Amount of random noise to add to bus positions to distinguish overlapping buses mapbox : bool, default False Switch to use Mapbox. mapbox_style : str, default 'open-street-map' Define the mapbox layout style of the interactive plot. If this is set to a mapbox layout, the argument mapbox_token must be a valid Mapbox API access token. Valid open layouts are: open-street-map, white-bg, carto-positron, carto-darkmatter, stamen-terrain, stamen-toner, stamen-watercolor Valid mapbox layouts are: basic, streets, outdoors, light, dark, satellite, satellite-streets mapbox_token : string Mapbox API access token. Obtain from https://www.mapbox.com. Can also be included in mapbox_parameters as `accesstoken=mapbox_token`. mapbox_parameters : dict Configuration parameters of the Mapbox layout. E.g. {"bearing": 5, "pitch": 10, "zoom": 1, "style": 'dark'}. Returns ------- fig: dictionary for plotly figure """ defaults_for_branches = { 'Link': dict(color="cyan", width=2), 'Line': dict(color="blue", width=2), 'Transformer': dict(color='green', width=2) } if fig is None: fig = dict(data=[],layout={}) if bus_text is None: bus_text = 'Bus ' + network.buses.index x, y = _get_coordinates(network, layouter=layouter) if jitter is not None: x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x)) y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y)) bus_trace = dict(x=x, y=y, text=bus_text, type="scatter", mode="markers", hoverinfo="text", marker=dict(color=bus_colors, size=bus_sizes), ) if bus_colorscale is not None: bus_trace['marker']['colorscale'] = bus_colorscale if bus_colorbar is not None: bus_trace['marker']['colorbar'] = bus_colorbar def as_branch_series(ser): if isinstance(ser, dict) and set(ser).issubset(branch_components): return pd.Series(ser) elif isinstance(ser, pd.Series): if isinstance(ser.index, pd.MultiIndex): return ser index = ser.index ser = ser.values else: index = network.lines.index return pd.Series(ser, index=pd.MultiIndex(levels=(["Line"], index), labels=(np.zeros(len(index)), np.arange(len(index))))) line_colors = as_branch_series(line_colors) line_widths = as_branch_series(line_widths) if line_text is not None: line_text = as_branch_series(line_text) shapes = [] shape_traces = [] for c in network.iterate_components(branch_components): l_defaults = defaults_for_branches[c.name] l_widths = line_widths.get(c.name, l_defaults['width']) l_colors = line_colors.get(c.name, l_defaults['color']) if line_text is None: l_text = c.name + ' ' + c.df.index else: l_text = line_text.get(c.name) if isinstance(l_colors, pd.Series): if issubclass(l_colors.dtype.type, np.number): l_colors = None else: l_colors.fillna(l_defaults['color'], inplace=True) x0 = c.df.bus0.map(x) x1 = c.df.bus1.map(x) y0 = c.df.bus0.map(y) y1 = c.df.bus1.map(y) for line in c.df.index: color = l_colors if isinstance(l_colors, string_types) else l_colors[line] width = l_widths if isinstance(l_widths, (int, float)) else l_widths[line] shapes.append(dict(type='line', x0=x0[line], y0=y0[line], x1=x1[line], y1=y1[line], opacity=0.7, line=dict(color=color, width=width))) shape_traces.append(dict(x=0.5*(x0+x1), y=0.5*(y0+y1), text=l_text, type="scatter", mode="markers", hoverinfo="text", marker=dict(opacity=0.))) if mapbox: shape_traces_latlon = [] for st in shape_traces: st['lon'] = st.pop('x') st['lat'] = st.pop('y') shape_traces_latlon.append(go.Scattermapbox(st)) shape_traces = shape_traces_latlon shapes_mapbox = [] for s in shapes: s['lon'] = [s.pop('x0'), s.pop('x1')] s['lat'] = [s.pop('y0'), s.pop('y1')] shapes_mapbox.append(go.Scattermapbox(s, mode='lines')) shapes = shapes_mapbox bus_trace['lon'] = bus_trace.pop('x') bus_trace['lat'] = bus_trace.pop('y') bus_trace = go.Scattermapbox(bus_trace) fig['data'].extend(shapes + shape_traces + [bus_trace]) else: fig['data'].extend([bus_trace]+shape_traces) fig['layout'].update(dict(title=title, hovermode='closest', showlegend=False)) if size is not None: assert len(size) == 2, "Parameter size must specify a tuple (width, height)." fig['layout'].update(dict(width=size[0], height=size[1])) if mapbox: if mapbox_token != "": mapbox_parameters['accesstoken'] = mapbox_token mapbox_parameters.setdefault('style', mapbox_style) if mapbox_parameters['style'] in _token_required_mb_styles: assert 'accesstoken' in mapbox_parameters.keys(), ("Using Mapbox " "layout styles requires a valid access token from https://www.mapbox.com/, " f"style which do not require a token are:\n{', '.join(_open__mb_styles)}.") if 'center' not in mapbox_parameters.keys(): lon=(network.buses.x.min() + network.buses.x.max()) / 2 lat=(network.buses.y.min() + network.buses.y.max()) / 2 mapbox_parameters['center'] = dict(lat=lat, lon=lon) if 'zoom' not in mapbox_parameters.keys(): mapbox_parameters['zoom'] = 2 fig['layout']['mapbox'] = mapbox_parameters else: fig['layout']['shapes'] = shapes if iplot: if not pltly_present: logger.warning("Plotly is not present, so interactive plotting won't work.") else: pltly.iplot(fig) return fig
751
def covariance_from_precision(Q): return np.linalg.inv(Q)
def covariance_from_precision(qq): return np.linalg.inv(Q)
7,437
def _clip_warp_output(input_image, output_image, mode, cval, clip): """Clip output image to range of values of input image. Note that this function modifies the values of `output_image` in-place and it is only modified if ``clip=True``. Parameters ---------- input_image : ndarray Input image. output_image : ndarray Output image, which is modified in-place. Other parameters ---------------- mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'} Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. cval : float Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. """ if clip: min_val = np.nanmin(input_image) max_val = np.nanmax(input_image) # Check if cval has been used such that it expands the effective input range preserve_cval = (mode == 'constant' and not np.isnan(cval) and not min_val <= cval <= max_val and np.nanmin(output_image) <= cval <= np.nanmax(output_image)) # Otherwise, set cval to be within the input range for clipping purposes if not preserve_cval: cval = min_val np.clip(output_image, np.min([min_val, cval]), np.max([max_val, cval]), out=output_image)
def _clip_warp_output(input_image, output_image, mode, cval, clip): """Clip output image to range of values of input image. Note that this function modifies the values of `output_image` in-place and it is only modified if ``clip=True``. Parameters ---------- input_image : ndarray Input image. output_image : ndarray Output image, which is modified in-place. Other parameters ---------------- mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'} Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. cval : float Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. """ if clip: min_val = np.nanmin(input_image) max_val = np.nanmax(input_image) # Check if cval has been used such that it expands the effective input range preserve_cval = (mode == 'constant' and not np.isnan(cval) and not min_val <= cval <= max_val and np.nanmin(output_image) <= cval <= np.nanmax(output_image)) # Otherwise, set cval to be within the input range for clipping purposes if not preserve_cval: cval = min_val np.clip(output_image, min(min_val, cval), max(max_val, cval), out=output_image)
7,189
def test_3d_motion(): # Generate synthetic data rnd = np.random.RandomState(0) image0 = rnd.normal(size=(128, 128, 128)) gt_flow, image1 = _sin_flow_gen(image0) # Estimate the flow flow = optical_flow_ilk(image0, image1) # Assert that the average absolute error is less then half a pixel assert abs(flow - gt_flow) .mean() < 0.5
def test_3d_motion(): # Generate synthetic data rnd = np.random.RandomState(0) image0 = rnd.normal(size=(128, 128, 128)) gt_flow, image1 = _sin_flow_gen(image0) # Estimate the flow flow = optical_flow_ilk(image0, image1) # Assert that the average absolute error is less then half a pixel assert abs(flow - gt_flow).mean() < 0.5
4,415
def _plot_mri_contours(*, mri_fname, surfaces, src, orientation='coronal', slices=None, show=True, show_indices=False, show_orientation=False, width=512, slices_as_subplots=True, fig_kind='figure'): """Plot BEM contours on anatomical MRI slices. Parameters ---------- slices_as_subplots : bool Whether to add all slices as subplots to a single figure, or to create a new figure for each slice. fig_kind : 'figure' | 'array' Whether to return slices as Matlotlib figures or NumPy arrays. Returns ------- array | list of array | matplotlib.figure.Figure | list of matplotlib.figure.Figure The plotted slices. """ import matplotlib.pyplot as plt from matplotlib import patheffects # For ease of plotting, we will do everything in voxel coordinates. _validate_type(show_orientation, (bool, str), 'show_orientation') if isinstance(show_orientation, str): _check_option('show_orientation', show_orientation, ('always',), extra='when str') _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal')) # Load the T1 data _, _, _, _, _, nim = _read_mri_info( mri_fname, units='mm', return_img=True) data, rasvox_mri_t = _reorient_image(nim) mri_rasvox_t = np.linalg.inv(rasvox_mri_t) axis, x, y = _mri_orientation(orientation) n_slices = data.shape[axis] # if no slices were specified, pick some equally-spaced ones automatically if slices is None: slices = np.round( np.linspace( start=0, stop=n_slices - 1, num=14 ) ).astype(int) # omit first and last one (not much brain visible there anyway…) slices = slices[1:-1] slices = np.atleast_1d(slices).copy() slices[slices < 0] += n_slices # allow negative indexing if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \ slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \ slices.dtype.kind not in 'iu': raise ValueError('slices must be a sorted 1D array of int with unique ' 'elements, at least one element, and no elements ' 'greater than %d, got %s' % (n_slices - 1, slices)) # create of list of surfaces surfs = list() for file_name, color in surfaces: surf = dict() surf['rr'], surf['tris'] = read_surface(file_name) # move surface to voxel coordinate system surf['rr'] = apply_trans(mri_rasvox_t, surf['rr']) surfs.append((surf, color)) sources = list() if src is not None: _ensure_src(src, extra=' or None') # Eventually we can relax this by allowing ``trans`` if need be if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI: raise ValueError( 'Source space must be in MRI coordinates, got ' f'{_frame_to_str[src[0]["coord_frame"]]}') for src_ in src: points = src_['rr'][src_['inuse'].astype(bool)] sources.append(apply_trans(mri_rasvox_t, points * 1e3)) sources = np.concatenate(sources, axis=0) # get the figure dimensions right if slices_as_subplots: n_col = 4 fig, axs, _, _ = _prepare_trellis(len(slices), n_col) fig.set_facecolor('k') dpi = fig.get_dpi() n_axes = len(axs) else: n_col = n_axes = 1 dpi = 96 # 2x standard MRI resolution is probably good enough for the # traces w = width / dpi figsize = (w, w / data.shape[x] * data.shape[y]) fig, axs, _, _ = _prepare_trellis(len(slices), n_col) plt.close(fig) # we'll create a figure for each slice bounds = np.concatenate( [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]] ) # float slicer = [slice(None)] * 3 ori_labels = dict(R='LR', A='PA', S='IS') xlabels, ylabels = ori_labels['RAS'[x]], ori_labels['RAS'[y]] path_effects = [patheffects.withStroke(linewidth=4, foreground="k", alpha=0.75)] figs = [] for ai, (sl, lower, upper) in enumerate( zip(slices, bounds[:-1], bounds[1:]) ): if slices_as_subplots: ax = axs[ai] else: fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k') ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k') # adjust the orientations for good view slicer[axis] = sl dat = data[tuple(slicer)].T # First plot the anatomical data ax.imshow(dat, cmap=plt.cm.gray, origin='lower') ax.set_autoscale_on(False) ax.axis('off') ax.set_aspect('equal') # XXX eventually could deal with zooms # and then plot the contours on top for surf, color in surfs: with warnings.catch_warnings(record=True): # ignore contour warn warnings.simplefilter('ignore') ax.tricontour(surf['rr'][:, x], surf['rr'][:, y], surf['tris'], surf['rr'][:, axis], levels=[sl], colors=color, linewidths=1.0, zorder=1) if len(sources): in_slice = (sources[:, axis] >= lower) & (sources[:, axis] < upper) ax.scatter(sources[in_slice, x], sources[in_slice, y], marker='.', color='#FF00FF', s=1, zorder=2) if show_indices: ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl), color='w', fontsize='x-small', va='bottom', ha='left') # label the axes kwargs = dict( color='#66CCEE', fontsize='medium', path_effects=path_effects, family='monospace', clip_on=False, zorder=5, weight='bold') always = (show_orientation == 'always') if show_orientation: if ai % n_col == 0 or always: # left ax.text(0, dat.shape[0] / 2., xlabels[0], va='center', ha='left', **kwargs) if ai % n_col == n_col - 1 or ai == n_axes - 1 or always: # right ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1], va='center', ha='right', **kwargs) if ai >= n_axes - n_col or always: # bottom ax.text(dat.shape[1] / 2., 0, ylabels[0], ha='center', va='bottom', **kwargs) if ai < n_col or n_col == 1 or always: # top ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1], ha='center', va='top', **kwargs) if fig_kind == 'array': with io.BytesIO() as buff: fig.savefig( buff, format='raw', bbox_inches='tight', pad_inches=0, dpi=dpi ) w_, h_ = fig.canvas.get_width_height() plt.close(fig) buff.seek(0) fig_array = np.frombuffer(buff.getvalue(), dtype=np.uint8) fig = fig_array.reshape((int(h_), int(w_), -1)) if not slices_as_subplots: figs.append(fig) if slices_as_subplots: fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0., hspace=0.) plt_show(show, fig=fig) return fig else: return figs
def _plot_mri_contours(*, mri_fname, surfaces, src, orientation='coronal', slices=None, show=True, show_indices=False, show_orientation=False, width=512, slices_as_subplots=True, fig_kind='figure'): """Plot BEM contours on anatomical MRI slices. Parameters ---------- slices_as_subplots : bool Whether to add all slices as subplots to a single figure, or to create a new figure for each slice. fig_kind : 'figure' | 'array' Whether to return Matplotlib figures or NumPy arrays. Returns ------- array | list of array | matplotlib.figure.Figure | list of matplotlib.figure.Figure The plotted slices. """ import matplotlib.pyplot as plt from matplotlib import patheffects # For ease of plotting, we will do everything in voxel coordinates. _validate_type(show_orientation, (bool, str), 'show_orientation') if isinstance(show_orientation, str): _check_option('show_orientation', show_orientation, ('always',), extra='when str') _check_option('orientation', orientation, ('coronal', 'axial', 'sagittal')) # Load the T1 data _, _, _, _, _, nim = _read_mri_info( mri_fname, units='mm', return_img=True) data, rasvox_mri_t = _reorient_image(nim) mri_rasvox_t = np.linalg.inv(rasvox_mri_t) axis, x, y = _mri_orientation(orientation) n_slices = data.shape[axis] # if no slices were specified, pick some equally-spaced ones automatically if slices is None: slices = np.round( np.linspace( start=0, stop=n_slices - 1, num=14 ) ).astype(int) # omit first and last one (not much brain visible there anyway…) slices = slices[1:-1] slices = np.atleast_1d(slices).copy() slices[slices < 0] += n_slices # allow negative indexing if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \ slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \ slices.dtype.kind not in 'iu': raise ValueError('slices must be a sorted 1D array of int with unique ' 'elements, at least one element, and no elements ' 'greater than %d, got %s' % (n_slices - 1, slices)) # create of list of surfaces surfs = list() for file_name, color in surfaces: surf = dict() surf['rr'], surf['tris'] = read_surface(file_name) # move surface to voxel coordinate system surf['rr'] = apply_trans(mri_rasvox_t, surf['rr']) surfs.append((surf, color)) sources = list() if src is not None: _ensure_src(src, extra=' or None') # Eventually we can relax this by allowing ``trans`` if need be if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI: raise ValueError( 'Source space must be in MRI coordinates, got ' f'{_frame_to_str[src[0]["coord_frame"]]}') for src_ in src: points = src_['rr'][src_['inuse'].astype(bool)] sources.append(apply_trans(mri_rasvox_t, points * 1e3)) sources = np.concatenate(sources, axis=0) # get the figure dimensions right if slices_as_subplots: n_col = 4 fig, axs, _, _ = _prepare_trellis(len(slices), n_col) fig.set_facecolor('k') dpi = fig.get_dpi() n_axes = len(axs) else: n_col = n_axes = 1 dpi = 96 # 2x standard MRI resolution is probably good enough for the # traces w = width / dpi figsize = (w, w / data.shape[x] * data.shape[y]) fig, axs, _, _ = _prepare_trellis(len(slices), n_col) plt.close(fig) # we'll create a figure for each slice bounds = np.concatenate( [[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]] ) # float slicer = [slice(None)] * 3 ori_labels = dict(R='LR', A='PA', S='IS') xlabels, ylabels = ori_labels['RAS'[x]], ori_labels['RAS'[y]] path_effects = [patheffects.withStroke(linewidth=4, foreground="k", alpha=0.75)] figs = [] for ai, (sl, lower, upper) in enumerate( zip(slices, bounds[:-1], bounds[1:]) ): if slices_as_subplots: ax = axs[ai] else: fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor='k') ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor='k') # adjust the orientations for good view slicer[axis] = sl dat = data[tuple(slicer)].T # First plot the anatomical data ax.imshow(dat, cmap=plt.cm.gray, origin='lower') ax.set_autoscale_on(False) ax.axis('off') ax.set_aspect('equal') # XXX eventually could deal with zooms # and then plot the contours on top for surf, color in surfs: with warnings.catch_warnings(record=True): # ignore contour warn warnings.simplefilter('ignore') ax.tricontour(surf['rr'][:, x], surf['rr'][:, y], surf['tris'], surf['rr'][:, axis], levels=[sl], colors=color, linewidths=1.0, zorder=1) if len(sources): in_slice = (sources[:, axis] >= lower) & (sources[:, axis] < upper) ax.scatter(sources[in_slice, x], sources[in_slice, y], marker='.', color='#FF00FF', s=1, zorder=2) if show_indices: ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl), color='w', fontsize='x-small', va='bottom', ha='left') # label the axes kwargs = dict( color='#66CCEE', fontsize='medium', path_effects=path_effects, family='monospace', clip_on=False, zorder=5, weight='bold') always = (show_orientation == 'always') if show_orientation: if ai % n_col == 0 or always: # left ax.text(0, dat.shape[0] / 2., xlabels[0], va='center', ha='left', **kwargs) if ai % n_col == n_col - 1 or ai == n_axes - 1 or always: # right ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1], va='center', ha='right', **kwargs) if ai >= n_axes - n_col or always: # bottom ax.text(dat.shape[1] / 2., 0, ylabels[0], ha='center', va='bottom', **kwargs) if ai < n_col or n_col == 1 or always: # top ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1], ha='center', va='top', **kwargs) if fig_kind == 'array': with io.BytesIO() as buff: fig.savefig( buff, format='raw', bbox_inches='tight', pad_inches=0, dpi=dpi ) w_, h_ = fig.canvas.get_width_height() plt.close(fig) buff.seek(0) fig_array = np.frombuffer(buff.getvalue(), dtype=np.uint8) fig = fig_array.reshape((int(h_), int(w_), -1)) if not slices_as_subplots: figs.append(fig) if slices_as_subplots: fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0., hspace=0.) plt_show(show, fig=fig) return fig else: return figs
37,059
def pass_manager_drawer(pass_manager, filename, style=None): """ Draws the pass manager. This function needs `pydot <https://github.com/erocarrera/pydot>`, which in turn needs Graphviz <https://www.graphviz.org/>` to be installed. Args: pass_manager (PassManager): the pass manager to be drawn filename (str): file path to save image to style (dict or OrderedDict): keys are the pass classes and the values are the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered dict can be used to ensure a priority coloring when pass falls into multiple categories. Any values not included in the provided dict will be filled in from the default dict Raises: ImportError: when nxpd or pydot not installed. """ try: import pydot if not HAS_GRAPHVIZ: raise ImportError except ImportError: raise ImportError("pass_manager_drawer requires pydot and graphviz. " "Run 'pip install pydot'. " "Graphviz can be installed using 'brew install graphviz' on Mac" " or by downloading it from the website.") passes = pass_manager.passes() if not style: style = DEFAULT_STYLE # create the overall graph graph = pydot.Dot() # identifiers for nodes need to be unique, so assign an id # can't just use python's id in case the exact same pass was # appended more than once node_id = 0 prev_node = None for controller_group in passes: # label is the name of the flow controller (without the word controller) label = controller_group['type'].__name__.replace('Controller', '') # create the subgraph for this controller subgraph = pydot.Cluster(str(id(controller_group)), label=label) for pss in controller_group['passes']: # label is the name of the pass node = pydot.Node(str(node_id), label=str(type(pss).__name__), color=_get_node_color(pss, style), shape="rectangle") subgraph.add_node(node) node_id += 1 # the arguments that were provided to the pass when it was created arg_spec = inspect.getfullargspec(pss.__init__) # 0 is the args, 1: to remove the self arg args = arg_spec[0][1:] num_optional = len(arg_spec[3]) if arg_spec[3] else 0 # add in the inputs to the pass for arg_index, arg in enumerate(args): nd_style = 'solid' # any optional args are dashed # the num of optional counts from the end towards the start of the list if arg_index >= (len(args) - num_optional): nd_style = 'dashed' input_node = pydot.Node(node_id, label=arg, color="black", shape="ellipse", fontsize=10, style=nd_style) subgraph.add_node(input_node) node_id += 1 subgraph.add_edge(pydot.Edge(input_node, node)) # if there is a previous node, add an edge between them if prev_node: subgraph.add_edge(pydot.Edge(prev_node, node)) prev_node = node graph.add_subgraph(subgraph) if filename: # linter says this isn't a method - it is graph.write_png(filename) # pylint: disable=no-member
def pass_manager_drawer(pass_manager, filename: str, style: Optional[Union[dict, OrderedDict]] = None): """ Draws the pass manager. This function needs `pydot <https://github.com/erocarrera/pydot>`, which in turn needs Graphviz <https://www.graphviz.org/>` to be installed. Args: pass_manager (PassManager): the pass manager to be drawn filename (str): file path to save image to style (dict or OrderedDict): keys are the pass classes and the values are the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered dict can be used to ensure a priority coloring when pass falls into multiple categories. Any values not included in the provided dict will be filled in from the default dict Raises: ImportError: when nxpd or pydot not installed. """ try: import pydot if not HAS_GRAPHVIZ: raise ImportError except ImportError: raise ImportError("pass_manager_drawer requires pydot and graphviz. " "Run 'pip install pydot'. " "Graphviz can be installed using 'brew install graphviz' on Mac" " or by downloading it from the website.") passes = pass_manager.passes() if not style: style = DEFAULT_STYLE # create the overall graph graph = pydot.Dot() # identifiers for nodes need to be unique, so assign an id # can't just use python's id in case the exact same pass was # appended more than once node_id = 0 prev_node = None for controller_group in passes: # label is the name of the flow controller (without the word controller) label = controller_group['type'].__name__.replace('Controller', '') # create the subgraph for this controller subgraph = pydot.Cluster(str(id(controller_group)), label=label) for pss in controller_group['passes']: # label is the name of the pass node = pydot.Node(str(node_id), label=str(type(pss).__name__), color=_get_node_color(pss, style), shape="rectangle") subgraph.add_node(node) node_id += 1 # the arguments that were provided to the pass when it was created arg_spec = inspect.getfullargspec(pss.__init__) # 0 is the args, 1: to remove the self arg args = arg_spec[0][1:] num_optional = len(arg_spec[3]) if arg_spec[3] else 0 # add in the inputs to the pass for arg_index, arg in enumerate(args): nd_style = 'solid' # any optional args are dashed # the num of optional counts from the end towards the start of the list if arg_index >= (len(args) - num_optional): nd_style = 'dashed' input_node = pydot.Node(node_id, label=arg, color="black", shape="ellipse", fontsize=10, style=nd_style) subgraph.add_node(input_node) node_id += 1 subgraph.add_edge(pydot.Edge(input_node, node)) # if there is a previous node, add an edge between them if prev_node: subgraph.add_edge(pydot.Edge(prev_node, node)) prev_node = node graph.add_subgraph(subgraph) if filename: # linter says this isn't a method - it is graph.write_png(filename) # pylint: disable=no-member
210
def test_get_build_json(bodhi_container, db_container): """Test ``/builds/{nvr}`` path""" # Fetch build(of latest update) from DB query_updates = ( "SELECT " " id " "FROM updates " "ORDER BY date_submitted DESC LIMIT 1" ) query_builds = ( "SELECT " " nvr, " " release_id, " " signed, " " type, " " epoch " "FROM builds " "WHERE update_id = %s LIMIT 1" ) db_ip = db_container.get_IPv4s()[0] conn = psycopg2.connect("dbname=bodhi2 user=postgres host={}".format(db_ip)) with conn: with conn.cursor() as curs: curs.execute(query_updates) update_id = curs.fetchone()[0] curs.execute(query_builds, (update_id, )) row = curs.fetchone() nvr = row[0] release_id = row[1] signed = row[2] type = row[3] epoch = row[4] conn.close() # GET on build with bodhi_container.http_client(port="8080") as c: http_response = c.get(f"/builds/{nvr}") build = {"nvr": nvr, "release_id": release_id, "signed": signed, "type": type, "epoch": epoch} try: assert http_response.ok assert build == http_response.json() except AssertionError: print(http_response) print(http_response.text) with read_file(bodhi_container, "/httpdir/errorlog") as log: print(log.read()) raise
def test_get_build_json(bodhi_container, db_container): """Test ``/builds/{nvr}`` path""" # Fetch builds (of latest update) from DB query_updates = ( "SELECT " " id " "FROM updates " "ORDER BY date_submitted DESC LIMIT 1" ) query_builds = ( "SELECT " " nvr, " " release_id, " " signed, " " type, " " epoch " "FROM builds " "WHERE update_id = %s LIMIT 1" ) db_ip = db_container.get_IPv4s()[0] conn = psycopg2.connect("dbname=bodhi2 user=postgres host={}".format(db_ip)) with conn: with conn.cursor() as curs: curs.execute(query_updates) update_id = curs.fetchone()[0] curs.execute(query_builds, (update_id, )) row = curs.fetchone() nvr = row[0] release_id = row[1] signed = row[2] type = row[3] epoch = row[4] conn.close() # GET on build with bodhi_container.http_client(port="8080") as c: http_response = c.get(f"/builds/{nvr}") build = {"nvr": nvr, "release_id": release_id, "signed": signed, "type": type, "epoch": epoch} try: assert http_response.ok assert build == http_response.json() except AssertionError: print(http_response) print(http_response.text) with read_file(bodhi_container, "/httpdir/errorlog") as log: print(log.read()) raise
2,760
def calibration_curve( y_true, y_prob, *, pos_label=None, normalize="deprecated", n_bins=5, strategy="uniform", ): """Compute true and predicted probabilities for a calibration curve. The method assumes the inputs come from a binary classifier, and discretize the [0, 1] interval into bins. Calibration curves may also be referred to as reliability diagrams. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array-like of shape (n_samples,) True targets. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. pos_label : int or str, default=None The label of the positive class. .. versionadded:: 1.1 normalize : bool, default="deprecated" Whether y_prob needs to be normalized into the [0, 1] interval, i.e. is not a proper probability. If True, the smallest value in y_prob is linearly mapped onto 0 and the largest one onto 1. .. deprecated:: 1.1 The normalize argument is deprecated in v1.1 and will be removed in v1.3. Explicitly normalizing `y_prob` will reproduce this behavior, but it is recommended that a proper probability is used (i.e. a classifier's `predict_proba` positive class). n_bins : int or sequence, default=5 Number of bins to discretize the [0, 1] interval. A bigger number requires more data. A sequence of bins can also be given. Bins with no samples (i.e. without corresponding values in `y_prob`) will not be returned, thus the returned arrays may have less than `n_bins` values. strategy : {'uniform', 'quantile'}, default='uniform' Strategy used to define the widths of the bins. uniform The bins have identical widths. quantile The bins have the same number of samples and depend on `y_prob`. Ignored if n_bins is an array of bins. Returns ------- prob_true : ndarray of shape (n_bins,) or smaller The proportion of samples whose class is the positive class, in each bin (fraction of positives). prob_pred : ndarray of shape (n_bins,) or smaller The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). Examples -------- >>> import numpy as np >>> from sklearn.calibration import calibration_curve >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) >>> prob_true array([0. , 0.5, 1. ]) >>> prob_pred array([0.2 , 0.525, 0.85 ]) """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) check_consistent_length(y_true, y_prob) pos_label = _check_pos_label_consistency(pos_label, y_true) # TODO(1.3): Remove normalize conditional block. if normalize != "deprecated": warnings.warn( "The normalize argument is deprecated in v1.1 and will be removed in v1.3." " Explicitly normalizing y_prob will reproduce this behavior, but it is" " recommended that a proper probability is used (i.e. a classifier's" " `predict_proba` positive class or `decision_function` output calibrated" " with `CalibratedClassifierCV`).", FutureWarning, ) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) if y_prob.min() < 0 or y_prob.max() > 1: raise ValueError("y_prob has values outside [0, 1].") labels = np.unique(y_true) if len(labels) > 2: raise ValueError( f"Only binary classification is supported. Provided labels {labels}." ) y_true = y_true == pos_label bins = bins_from_strategy(n_bins, strategy, y_prob) binids = np.searchsorted(bins[1:-1], y_prob) bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = bin_true[nonzero] / bin_total[nonzero] prob_pred = bin_sums[nonzero] / bin_total[nonzero] return prob_true, prob_pred
def calibration_curve( y_true, y_prob, *, pos_label=None, normalize="deprecated", n_bins=5, strategy="uniform", ): """Compute true and predicted probabilities for a calibration curve. The method assumes the inputs come from a binary classifier, and discretize the [0, 1] interval into bins. Calibration curves may also be referred to as reliability diagrams. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array-like of shape (n_samples,) True targets. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. pos_label : int or str, default=None The label of the positive class. .. versionadded:: 1.1 normalize : bool, default="deprecated" Whether y_prob needs to be normalized into the [0, 1] interval, i.e. is not a proper probability. If True, the smallest value in y_prob is linearly mapped onto 0 and the largest one onto 1. .. deprecated:: 1.1 The normalize argument is deprecated in v1.1 and will be removed in v1.3. Explicitly normalizing `y_prob` will reproduce this behavior, but it is recommended that a proper probability is used (i.e. a classifier's `predict_proba` positive class). n_bins : int or array-like, default=5 Number of bins to discretize the [0, 1] interval. A bigger number requires more data. A sequence of bins can also be given. Bins with no samples (i.e. without corresponding values in `y_prob`) will not be returned, thus the returned arrays may have less than `n_bins` values. strategy : {'uniform', 'quantile'}, default='uniform' Strategy used to define the widths of the bins. uniform The bins have identical widths. quantile The bins have the same number of samples and depend on `y_prob`. Ignored if n_bins is an array of bins. Returns ------- prob_true : ndarray of shape (n_bins,) or smaller The proportion of samples whose class is the positive class, in each bin (fraction of positives). prob_pred : ndarray of shape (n_bins,) or smaller The mean predicted probability in each bin. References ---------- Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good Probabilities With Supervised Learning, in Proceedings of the 22nd International Conference on Machine Learning (ICML). See section 4 (Qualitative Analysis of Predictions). Examples -------- >>> import numpy as np >>> from sklearn.calibration import calibration_curve >>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1]) >>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.]) >>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3) >>> prob_true array([0. , 0.5, 1. ]) >>> prob_pred array([0.2 , 0.525, 0.85 ]) """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) check_consistent_length(y_true, y_prob) pos_label = _check_pos_label_consistency(pos_label, y_true) # TODO(1.3): Remove normalize conditional block. if normalize != "deprecated": warnings.warn( "The normalize argument is deprecated in v1.1 and will be removed in v1.3." " Explicitly normalizing y_prob will reproduce this behavior, but it is" " recommended that a proper probability is used (i.e. a classifier's" " `predict_proba` positive class or `decision_function` output calibrated" " with `CalibratedClassifierCV`).", FutureWarning, ) if normalize: # Normalize predicted values into interval [0, 1] y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min()) if y_prob.min() < 0 or y_prob.max() > 1: raise ValueError("y_prob has values outside [0, 1].") labels = np.unique(y_true) if len(labels) > 2: raise ValueError( f"Only binary classification is supported. Provided labels {labels}." ) y_true = y_true == pos_label bins = bins_from_strategy(n_bins, strategy, y_prob) binids = np.searchsorted(bins[1:-1], y_prob) bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins)) bin_true = np.bincount(binids, weights=y_true, minlength=len(bins)) bin_total = np.bincount(binids, minlength=len(bins)) nonzero = bin_total != 0 prob_true = bin_true[nonzero] / bin_total[nonzero] prob_pred = bin_sums[nonzero] / bin_total[nonzero] return prob_true, prob_pred
16,269
def wait_feed_is_done(hass): """Block till feed is done.""" hass.block_till_done() hass.bus.fire(EVENT_HOMEASSISTANT_START) hass.block_till_done()
def wait_feed_is_done(hass): """Block till feed is done.""" hass.block_till_done() hass.bus.async_fire(EVENT_HOMEASSISTANT_START) hass.block_till_done()
8,866
def subreddit_sorting(bot, trigger, s, sorting): if sorting == 'new': submissions = list(s.new()) elif sorting == 'top': submissions = list(s.top()) elif sorting == 'hot': submissions = list(s.hot()) elif sorting == 'controversial': submissions = list(s.controversial()) elif sorting == 'gilded': submissions = list(s.gilded()) elif sorting == 'rising': submissions = list(s.rising()) elif sorting == 'sticky': try: submissions = [s.sticky()] except prawcore.exceptions.NotFound: bot.say("r/" + s.display_name + " appears to not have a stickied post!") return elif sorting == 'random': submissions = [s.random()] or [] else: return if not len(submissions): bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!") return NOLIMIT if sorting != 'sticky': submissions_filter = [] for submission in submissions: if not submission.stickied: submissions_filter.append(submission) submissions = submissions_filter submission = submissions[0] link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission) say_post_info( bot, trigger, re.match(post_url, link).group(1), False, True)
def subreddit_sorting(bot, trigger, s, sorting): if sorting == 'new': submissions = list(s.new()) elif sorting == 'top': submissions = list(s.top()) elif sorting == 'hot': submissions = list(s.hot()) elif sorting == 'controversial': submissions = list(s.controversial(limit=10)) elif sorting == 'gilded': submissions = list(s.gilded()) elif sorting == 'rising': submissions = list(s.rising()) elif sorting == 'sticky': try: submissions = [s.sticky()] except prawcore.exceptions.NotFound: bot.say("r/" + s.display_name + " appears to not have a stickied post!") return elif sorting == 'random': submissions = [s.random()] or [] else: return if not len(submissions): bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!") return NOLIMIT if sorting != 'sticky': submissions_filter = [] for submission in submissions: if not submission.stickied: submissions_filter.append(submission) submissions = submissions_filter submission = submissions[0] link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission) say_post_info( bot, trigger, re.match(post_url, link).group(1), False, True)
59,930
def test_cancel_with_grace(minimal_job, scheduler, local_only): # This test emulates a spawned process that ignores the SIGTERM signal # and also spawns another process: # # reframe --- local job script --- sleep 5 # (TERM IGN) # # We expect the job not to be cancelled immediately, since it ignores # the gracious signal we are sending it. However, we expect it to be # killed immediately after the grace period of 2 seconds expires. # # We also check that the additional spawned process is also killed. minimal_job.time_limit = '1m' minimal_job.scheduler.CANCEL_GRACE_PERIOD = 2 prepare_job(minimal_job, command='sleep 5 &', pre_run=['trap -- "" TERM'], post_run=['echo $!', 'wait'], prepare_cmds=['']) minimal_job.submit() # Stall a bit here to let the the spawned process start and install its # signal handler for SIGTERM time.sleep(1) # Try reading the pid of spawned sleep, until a valid value is retrieved for i in range(3): try: with open(minimal_job.stdout) as fp: sleep_pid = int(fp.read()) except ValueError: time.sleep(1) continue else: break else: pytest.fail('failed to retrieve the spawned sleep process pid') t_grace = time.time() minimal_job.cancel() time.sleep(0.1) minimal_job.wait() t_grace = time.time() - t_grace assert t_grace >= 2 and t_grace < 5 assert minimal_job.state == 'FAILURE' assert minimal_job.signal == signal.SIGKILL # Verify that the spawned sleep is killed, too, but back off a bit in # order to allow the sleep process to wake up and get the signal time.sleep(0.1) assert_process_died(sleep_pid)
def test_cancel_with_grace(minimal_job, scheduler, local_only): # This test emulates a spawned process that ignores the SIGTERM signal # and also spawns another process: # # reframe --- local job script --- sleep 5 # (TERM IGN) # # We expect the job not to be cancelled immediately, since it ignores # the gracious signal we are sending it. However, we expect it to be # killed immediately after the grace period of 2 seconds expires. # # We also check that the additional spawned process is also killed. minimal_job.time_limit = '1m' minimal_job.scheduler.CANCEL_GRACE_PERIOD = 2 prepare_job(minimal_job, command='sleep 5 &', pre_run=['trap -- "" TERM'], post_run=['echo $!', 'wait'], prepare_cmds=['']) minimal_job.submit() # Stall a bit here to let the the spawned process start and install its # signal handler for SIGTERM time.sleep(1) # Try reading the pid of spawned sleep, until a valid value is retrieved for i in range(3): try: with open(minimal_job.stdout) as fp: sleep_pid = int(fp.read()) except ValueError: time.sleep(1) continue break else: pytest.fail('failed to retrieve the spawned sleep process pid') t_grace = time.time() minimal_job.cancel() time.sleep(0.1) minimal_job.wait() t_grace = time.time() - t_grace assert t_grace >= 2 and t_grace < 5 assert minimal_job.state == 'FAILURE' assert minimal_job.signal == signal.SIGKILL # Verify that the spawned sleep is killed, too, but back off a bit in # order to allow the sleep process to wake up and get the signal time.sleep(0.1) assert_process_died(sleep_pid)
5,811
def permutation_test(data, statistic, *, permutation_type='independent', vectorized=False, n_resamples=9999, batch=None, alternative="two-sided", axis=0, random_state=None): r""" Performs a permutation test of a given statistic on provided data. For independent sample statistics, the null hypothesis is that the data are randomly sampled from the same distribution. For paired sample statistics, two null hypothesis can be tested: that the data are paired at random or that the data are assigned to samples at random. Parameters ---------- data : iterable of array-like Contains the samples, each of which is an array of observations. Dimensions of sample arrays must be compatible for broadcasting except along `axis`. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts samples as separate arguments (e.g. ``statistic(*data)``) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the sample arrays. permutation_type : {'independent', 'samples', 'pairings'}, optional The type of permutations to be performed, in accordance with the null hypothesis. The first two permutation types are for paired sample statistics, in which all samples contain the same number of observations and observations with corresponding indices along `axis` are considered to be paired; the third is for independent sample statistics. - ``'samples'`` : observations are assigned to different samples but remain paired with the same observations from other samples. This permutation type is appropriate for paired sample hypothesis tests such as the Wilcoxon signed-rank test and the paired t-test. - ``'pairings'`` : observations are paired with different observations, but they remain within the same sample. This permutation type is appropriate for association/correlation tests with statistics such as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's :math:`r`. - ``'independent'`` (default) : observations are assigned to different samples. Samples may contain different numbers of observations. This permutation type is appropriate for independent sample hypothesis tests such as the Mann-Whitney :math:`U` test and the independent sample t-test. Please see the Notes section below for more detailed descriptions of the permutation types. vectorized : bool, default: ``False`` By default, `statistic` is assumed to calculate the statistic only for 1D arrays contained in `data`. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the ND arrays in `data`. Use of a vectorized statistic can reduce computation time. n_resamples : int or np.inf, default: 9999 Number of random permutations (resamples) used to approximate the null distribution. If greater than or equal to the number of distinct permutations, the exact null distribution will be computed. Note that the number of distinct permutations grows very rapidly with the sizes of samples, so exact tests are feasible only for very small data sets. batch : int, optional The number of permutations to process in each call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the total size of all samples, regardless of the value of `vectorized`. Default is ``None``, in which case ``batch`` is the number of permutations. alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined for exact tests as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` (default) : twice the smaller of the p-values above. Note that p-values for randomized tests are calculated according to the conservative (over-estimated) approximation suggested in [2]_ and [3]_ rather than the unbiased estimator suggested in [4]_. That is, when calculating the proportion of the randomized null distribution that is as extreme as the observed value of the test statistic, the values in the numerator and denominator are both increased by one. An interpretation of this adjustment is that the observed value of the test statistic is always included as an element of the randomized null distribution. The convention used for two-sided p-values is not universal; the observed test statistic and null distribution are returned in case a different definition is preferred. axis : int, default: 0 The axis of the (broadcasted) samples over which to calculate the statistic. If samples have a different number of dimensions, singleton dimensions are prepended to samples with fewer dimensions before `axis` is considered. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate permutations. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. Notes ----- The three types of permutation tests supported by this function are described below. **Unpaired statistics** (``permutation_type='independent'``): The null hypothesis associated with this permutation type is that all observations are sampled from the same underlying distribution and that they have been assigned to one of the samples at random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. When ``1 < n_resamples < binom(n, k)``, where * ``k`` is the number of observations in ``a``, * ``n`` is the total number of observations in ``a`` and ``b``, and * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), the data are pooled (concatenated), randomly assigned to either the first or second sample, and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= binom(n, k)``, an exact test is performed: the data are *partitioned* between the samples in each distinct way exactly once, and the exact null distribution is formed. Note that for a given partitioning of the data between the samples, only one ordering/permutation of the data *within* each sample is considered. For statistics that do not depend on the order of the data within samples, this dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. Because only one ordering/permutation of the data *within* each sample is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` and ``y = [a4, a3, b1]`` would *not* be considered distinct from the example above. ``permutation_type='independent'`` does not support one-sample statistics, but it can be applied to statistics with more than two samples. In this case, if ``n`` is an array of the number of observations within each sample, the number of distinct partitions is:: np.product([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) **Paired statistics, permute pairings** (``permutation_type='pairings'``): The null hypothesis associated with this permutation type is that observations within each sample are drawn from the same underlying distribution and that pairings with elements of other samples are assigned at random. Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we wish to consider all possible pairings of elements of ``a`` with elements of a second sample, ``b``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are randomly permuted. The user-supplied statistic accepts one data argument, say ``a_perm``, and calculates the statistic considering ``a_perm`` and ``b``. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= factorial(n)``, an exact test is performed: ``a`` is permuted in each distinct way exactly once. Therefore, the `statistic` is computed for each unique pairing of samples between ``a`` and ``b`` exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left in its original order. ``permutation_type='pairings'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. All samples provided in ``data`` are permuted *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(n)**m Note that if a two-sample statistic, for example, does not inherently depend on the order in which observations are provided - only on the *pairings* of observations - then only one of the two samples should be provided in ``data``. This dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). **Paired statistics, permute samples** (``permutation_type='samples'``): The null hypothesis associated with this permutation type is that observations within each pair are drawn from the same underlying distribution and that the sample to which they are assigned is random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are randomly swapped between samples (maintaining their pairings) and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= 2**n``, an exact test is performed: the observations are assigned to the two samples in each distinct way (while maintaining pairings) exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. ``permutation_type='samples'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. If ``data`` contains more than one sample, paired observations within ``data`` are exchanged between samples *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(m)**n Several paired-sample statistical tests, such as the Wilcoxon signed rank test and paired-sample t-test, can be performed considering only the *difference* between two paired elements. Accordingly, if ``data`` contains only one sample, then the null distribution is formed by independently changing the *sign* of each observation. .. warning:: The p-value is calculated by counting the elements of the null distribution that are as extreme or more extreme than the observed value of the statistic. Due to the use of finite precision arithmetic, some statistic functions return numerically distinct values when the theoretical values would be exactly equal. In some cases, this could lead to a large error in the calculated p-value. `permutation_test` guards against this by considering elements in the null distribution that are "close" (within a factor of ``1+1e-14``) to the observed value of the test statistic as equal to the observed value of the test statistic. However, the user is advised to inspect the null distribution to assess whether this method of comparison is appropriate, and if not, calculate the p-value manually. See example below. References ---------- .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." Statistical Applications in Genetics and Molecular Biology 9.1 (2010). .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". Statistical Science (2004). .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap (1993). Examples -------- Suppose we wish to test whether two samples are drawn from the same distribution. Assume that the underlying distributions are unknown to us, and that before observing the data, we hypothesized that the mean of the first sample would be less than that of the second sample. We decide that we will use the difference between the sample means as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. For efficiency, we write the function defining the test statistic in a vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the statistic will be calculated for each axis-slice along `axis`. >>> def statistic(x, y, axis): ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) After collecting our data, we calculate the observed value of the test statistic. >>> from scipy.stats import norm >>> rng = np.random.default_rng() >>> x = norm.rvs(size=5, random_state=rng) >>> y = norm.rvs(size=6, loc = 3, random_state=rng) >>> statistic(x, y, 0) -3.5411688580987266 Indeed, the test statistic is negative, suggesting that the true mean of the distribution underlying ``x`` is less than that of the distribution underlying ``y``. To determine the probability of this occuring by chance if the two samples were drawn from the same distribution, we perform a permutation test. >>> from scipy.stats import permutation_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> # `n_resamples=np.inf` indicates that an exact test is to be performed >>> res = permutation_test((x, y), statistic, vectorized=True, ... n_resamples=np.inf, alternative='less') >>> print(res.statistic) -3.5411688580987266 >>> print(res.pvalue) 0.004329004329004329 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.4329%. This is less than our chosen threshold of 5%, so we consider this to to be significant evidence against the null hypothesis in favor of the alternative. Because the size of the samples above was small, `permutation_test` could perform an exact test. For larger samples, we resort to a randomized permutation test. >>> x = norm.rvs(size=100, random_state=rng) >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) >>> res = permutation_test((x, y), statistic, n_resamples=100000, ... vectorized=True, alternative='less', ... random_state=rng) >>> print(res.statistic) -0.5230459671240913 >>> print(res.pvalue) 0.00016999830001699983 The approximate probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.0225%. This is again less than our chosen threshold of 5%, so again we have significant evidence to reject the null hypothesis in favor of the alternative. For large samples and number of permutations, the result is comparable to that of the corresponding asymptotic test, the independent sample t-test. >>> from scipy.stats import ttest_ind >>> res_asymptotic = ttest_ind(x, y, alternative='less') >>> print(res_asymptotic.pvalue) 0.00012688101537979522 The permutation distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> plt.hist(res.null_distribution, bins=50) >>> plt.title("Permutation distribution of test statistic") >>> plt.xlabel("Value of Statistic") >>> plt.ylabel("Frequency") Inspection of the null distribution is essential if the statistic suffers from inaccuracy due to limited machine precision. Consider the following case: >>> from scipy.stats import pearsonr >>> x = [1, 2, 4, 3] >>> y = [2, 4, 6, 8] >>> def statistic(x, y): ... return pearsonr(x, y).statistic >>> res = permutation_test((x, y), statistic, vectorized=False, ... permutation_type='pairings', ... alternative='greater') >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution In this case, some elements of the null distribution differ from the observed value of the correlation coefficient ``r`` due to numerical noise. We manually inspect the elements of the null distribution that are nearly the same as the observed value of the test statistic. >>> r 0.8 >>> unique = np.unique(null) >>> unique array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, 0.6, 0.8, 0.8, 1. ]) >>> unique[np.isclose(r, unique)].tolist() [0.7999999999999999, 0.8] If `permutation_test` were to perform the comparison naively, the elements of the null distribution with value ``0.7999999999999999`` would not be considered as extreme or more extreme as the observed value of the statistic, so the calculated p-value would be too small. >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) >>> incorrect_pvalue 0.1111111111111111 Instead, `permutation_test` treats elements of the null distribution that are within a factor of ``1+1e-14`` of the observed value of the statistic to be equal to the test statistic. >>> correct_pvalue = np.count_nonzero(null >= r / (1+1e-14)) / len(null) >>> correct_pvalue 0.16666666666666666 >>> res.pvalue == correct_pvalue True This method of comparison is expected to be accurate in most practical situations, but the user is advised to assess this by inspecting the elements of the null distribution that are close to the observed value of the statistic. Also, consider the use of statistics that can be calculated using exact arithmetic (e.g. integer statistics). """ args = _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) (data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) = args observed = statistic(*data, axis=-1) null_calculators = {"pairings": _calculate_null_pairings, "samples": _calculate_null_samples, "independent": _calculate_null_both} null_calculator_args = (data, statistic, n_resamples, batch, random_state) calculate_null = null_calculators[permutation_type] null_distribution, n_resamples, exact_test = ( calculate_null(*null_calculator_args)) # See References [2] and [3] adjustment = 0 if exact_test else 1 # relative tolerance for detecting numerically distinct but # theoretically equal values in the null distribution eps = 1e-14 def less(null_distribution, observed): cmps = null_distribution <= observed * (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed / (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {"less": less, "greater": greater, "two-sided": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return PermutationTestResult(observed, pvalues, null_distribution)
def permutation_test(data, statistic, *, permutation_type='independent', vectorized=False, n_resamples=9999, batch=None, alternative="two-sided", axis=0, random_state=None): r""" Performs a permutation test of a given statistic on provided data. For independent sample statistics, the null hypothesis is that the data are randomly sampled from the same distribution. For paired sample statistics, two null hypothesis can be tested: that the data are paired at random or that the data are assigned to samples at random. Parameters ---------- data : iterable of array-like Contains the samples, each of which is an array of observations. Dimensions of sample arrays must be compatible for broadcasting except along `axis`. statistic : callable Statistic for which the p-value of the hypothesis test is to be calculated. `statistic` must be a callable that accepts samples as separate arguments (e.g. ``statistic(*data)``) and returns the resulting statistic. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the sample arrays. permutation_type : {'independent', 'samples', 'pairings'}, optional The type of permutations to be performed, in accordance with the null hypothesis. The first two permutation types are for paired sample statistics, in which all samples contain the same number of observations and observations with corresponding indices along `axis` are considered to be paired; the third is for independent sample statistics. - ``'samples'`` : observations are assigned to different samples but remain paired with the same observations from other samples. This permutation type is appropriate for paired sample hypothesis tests such as the Wilcoxon signed-rank test and the paired t-test. - ``'pairings'`` : observations are paired with different observations, but they remain within the same sample. This permutation type is appropriate for association/correlation tests with statistics such as Spearman's :math:`\rho`, Kendall's :math:`\tau`, and Pearson's :math:`r`. - ``'independent'`` (default) : observations are assigned to different samples. Samples may contain different numbers of observations. This permutation type is appropriate for independent sample hypothesis tests such as the Mann-Whitney :math:`U` test and the independent sample t-test. Please see the Notes section below for more detailed descriptions of the permutation types. vectorized : bool, default: ``False`` By default, `statistic` is assumed to calculate the statistic only for 1D arrays contained in `data`. If `vectorized` is set ``True``, `statistic` must also accept a keyword argument `axis` and be vectorized to compute the statistic along the provided `axis` of the ND arrays in `data`. Use of a vectorized statistic can reduce computation time. n_resamples : int or np.inf, default: 9999 Number of random permutations (resamples) used to approximate the null distribution. If greater than or equal to the number of distinct permutations, the exact null distribution will be computed. Note that the number of distinct permutations grows very rapidly with the sizes of samples, so exact tests are feasible only for very small data sets. batch : int, optional The number of permutations to process in each call to `statistic`. Memory usage is O(`batch`*``n``), where ``n`` is the total size of all samples, regardless of the value of `vectorized`. Default is ``None``, in which case ``batch`` is the number of permutations. alternative : {'two-sided', 'less', 'greater'}, optional The alternative hypothesis for which the p-value is calculated. For each alternative, the p-value is defined for exact tests as follows. - ``'greater'`` : the percentage of the null distribution that is greater than or equal to the observed value of the test statistic. - ``'less'`` : the percentage of the null distribution that is less than or equal to the observed value of the test statistic. - ``'two-sided'`` (default) : twice the smaller of the p-values above. Note that p-values for randomized tests are calculated according to the conservative (over-estimated) approximation suggested in [2]_ and [3]_ rather than the unbiased estimator suggested in [4]_. That is, when calculating the proportion of the randomized null distribution that is as extreme as the observed value of the test statistic, the values in the numerator and denominator are both increased by one. An interpretation of this adjustment is that the observed value of the test statistic is always included as an element of the randomized null distribution. The convention used for two-sided p-values is not universal; the observed test statistic and null distribution are returned in case a different definition is preferred. axis : int, default: 0 The axis of the (broadcasted) samples over which to calculate the statistic. If samples have a different number of dimensions, singleton dimensions are prepended to samples with fewer dimensions before `axis` is considered. random_state : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional Pseudorandom number generator state used to generate permutations. If `random_state` is ``None`` (default), the `numpy.random.RandomState` singleton is used. If `random_state` is an int, a new ``RandomState`` instance is used, seeded with `random_state`. If `random_state` is already a ``Generator`` or ``RandomState`` instance then that instance is used. Returns ------- statistic : float or ndarray The observed test statistic of the data. pvalue : float or ndarray The p-value for the given alternative. null_distribution : ndarray The values of the test statistic generated under the null hypothesis. Notes ----- The three types of permutation tests supported by this function are described below. **Unpaired statistics** (``permutation_type='independent'``): The null hypothesis associated with this permutation type is that all observations are sampled from the same underlying distribution and that they have been assigned to one of the samples at random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. When ``1 < n_resamples < binom(n, k)``, where * ``k`` is the number of observations in ``a``, * ``n`` is the total number of observations in ``a`` and ``b``, and * ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``), the data are pooled (concatenated), randomly assigned to either the first or second sample, and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= binom(n, k)``, an exact test is performed: the data are *partitioned* between the samples in each distinct way exactly once, and the exact null distribution is formed. Note that for a given partitioning of the data between the samples, only one ordering/permutation of the data *within* each sample is considered. For statistics that do not depend on the order of the data within samples, this dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). For ``a = [a1, a2, a3, a4]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b3, a1, a2, b2]`` and ``y = [a4, b1, a3]``. Because only one ordering/permutation of the data *within* each sample is considered in an exact test, a resampling like ``x = [b3, a1, b2, a2]`` and ``y = [a4, a3, b1]`` would *not* be considered distinct from the example above. ``permutation_type='independent'`` does not support one-sample statistics, but it can be applied to statistics with more than two samples. In this case, if ``n`` is an array of the number of observations within each sample, the number of distinct partitions is:: np.product([binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) **Paired statistics, permute pairings** (``permutation_type='pairings'``): The null hypothesis associated with this permutation type is that observations within each sample are drawn from the same underlying distribution and that pairings with elements of other samples are assigned at random. Suppose ``data`` contains only one sample; e.g. ``a, = data``, and we wish to consider all possible pairings of elements of ``a`` with elements of a second sample, ``b``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < factorial(n)``, the elements of ``a`` are randomly permuted. The user-supplied statistic accepts one data argument, say ``a_perm``, and calculates the statistic considering ``a_perm`` and ``b``. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= factorial(n)``, an exact test is performed: ``a`` is permuted in each distinct way exactly once. Therefore, the `statistic` is computed for each unique pairing of samples between ``a`` and ``b`` exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``a_perm = [a3, a1, a2]`` while ``b`` is left in its original order. ``permutation_type='pairings'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. All samples provided in ``data`` are permuted *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(n)**m Note that if a two-sample statistic, for example, does not inherently depend on the order in which observations are provided - only on the *pairings* of observations - then only one of the two samples should be provided in ``data``. This dramatically reduces computational cost without affecting the shape of the null distribution (because the frequency/count of each value is affected by the same factor). **Paired statistics, permute samples** (``permutation_type='samples'``): The null hypothesis associated with this permutation type is that observations within each pair are drawn from the same underlying distribution and that the sample to which they are assigned is random. Suppose ``data`` contains two samples; e.g. ``a, b = data``. Let ``n`` be the number of observations in ``a``, which must also equal the number of observations in ``b``. When ``1 < n_resamples < 2**n``, the elements of ``a`` are ``b`` are randomly swapped between samples (maintaining their pairings) and the statistic is calculated. This process is performed repeatedly, `permutation` times, generating a distribution of the statistic under the null hypothesis. The statistic of the original data is compared to this distribution to determine the p-value. When ``n_resamples >= 2**n``, an exact test is performed: the observations are assigned to the two samples in each distinct way (while maintaining pairings) exactly once. For ``a = [a1, a2, a3]`` and ``b = [b1, b2, b3]``, an example of this permutation type is ``x = [b1, a2, b3]`` and ``y = [a1, b2, a3]``. ``permutation_type='samples'`` supports ``data`` containing any number of samples, each of which must contain the same number of observations. If ``data`` contains more than one sample, paired observations within ``data`` are exchanged between samples *independently*. Therefore, if ``m`` is the number of samples and ``n`` is the number of observations within each sample, then the number of permutations in an exact test is:: factorial(m)**n Several paired-sample statistical tests, such as the Wilcoxon signed rank test and paired-sample t-test, can be performed considering only the *difference* between two paired elements. Accordingly, if ``data`` contains only one sample, then the null distribution is formed by independently changing the *sign* of each observation. .. warning:: The p-value is calculated by counting the elements of the null distribution that are as extreme or more extreme than the observed value of the statistic. Due to the use of finite precision arithmetic, some statistic functions return numerically distinct values when the theoretical values would be exactly equal. In some cases, this could lead to a large error in the calculated p-value. `permutation_test` guards against this by considering elements in the null distribution that are "close" (within a factor of ``1+1e-14``) to the observed value of the test statistic as equal to the observed value of the test statistic. However, the user is advised to inspect the null distribution to assess whether this method of comparison is appropriate, and if not, calculate the p-value manually. See example below. References ---------- .. [1] R. A. Fisher. The Design of Experiments, 6th Ed (1951). .. [2] B. Phipson and G. K. Smyth. "Permutation P-values Should Never Be Zero: Calculating Exact P-values When Permutations Are Randomly Drawn." Statistical Applications in Genetics and Molecular Biology 9.1 (2010). .. [3] M. D. Ernst. "Permutation Methods: A Basis for Exact Inference". Statistical Science (2004). .. [4] B. Efron and R. J. Tibshirani. An Introduction to the Bootstrap (1993). Examples -------- Suppose we wish to test whether two samples are drawn from the same distribution. Assume that the underlying distributions are unknown to us, and that before observing the data, we hypothesized that the mean of the first sample would be less than that of the second sample. We decide that we will use the difference between the sample means as a test statistic, and we will consider a p-value of 0.05 to be statistically significant. For efficiency, we write the function defining the test statistic in a vectorized fashion: the samples ``x`` and ``y`` can be ND arrays, and the statistic will be calculated for each axis-slice along `axis`. >>> def statistic(x, y, axis): ... return np.mean(x, axis=axis) - np.mean(y, axis=axis) After collecting our data, we calculate the observed value of the test statistic. >>> from scipy.stats import norm >>> rng = np.random.default_rng() >>> x = norm.rvs(size=5, random_state=rng) >>> y = norm.rvs(size=6, loc = 3, random_state=rng) >>> statistic(x, y, 0) -3.5411688580987266 Indeed, the test statistic is negative, suggesting that the true mean of the distribution underlying ``x`` is less than that of the distribution underlying ``y``. To determine the probability of this occuring by chance if the two samples were drawn from the same distribution, we perform a permutation test. >>> from scipy.stats import permutation_test >>> # because our statistic is vectorized, we pass `vectorized=True` >>> # `n_resamples=np.inf` indicates that an exact test is to be performed >>> res = permutation_test((x, y), statistic, vectorized=True, ... n_resamples=np.inf, alternative='less') >>> print(res.statistic) -3.5411688580987266 >>> print(res.pvalue) 0.004329004329004329 The probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.4329%. This is less than our chosen threshold of 5%, so we consider this to to be significant evidence against the null hypothesis in favor of the alternative. Because the size of the samples above was small, `permutation_test` could perform an exact test. For larger samples, we resort to a randomized permutation test. >>> x = norm.rvs(size=100, random_state=rng) >>> y = norm.rvs(size=120, loc=0.3, random_state=rng) >>> res = permutation_test((x, y), statistic, n_resamples=100000, ... vectorized=True, alternative='less', ... random_state=rng) >>> print(res.statistic) -0.5230459671240913 >>> print(res.pvalue) 0.00016999830001699983 The approximate probability of obtaining a test statistic less than or equal to the observed value under the null hypothesis is 0.0225%. This is again less than our chosen threshold of 5%, so again we have significant evidence to reject the null hypothesis in favor of the alternative. For large samples and number of permutations, the result is comparable to that of the corresponding asymptotic test, the independent sample t-test. >>> from scipy.stats import ttest_ind >>> res_asymptotic = ttest_ind(x, y, alternative='less') >>> print(res_asymptotic.pvalue) 0.00012688101537979522 The permutation distribution of the test statistic is provided for further investigation. >>> import matplotlib.pyplot as plt >>> plt.hist(res.null_distribution, bins=50) >>> plt.title("Permutation distribution of test statistic") >>> plt.xlabel("Value of Statistic") >>> plt.ylabel("Frequency") Inspection of the null distribution is essential if the statistic suffers from inaccuracy due to limited machine precision. Consider the following case: >>> from scipy.stats import pearsonr >>> x = [1, 2, 4, 3] >>> y = [2, 4, 6, 8] >>> def statistic(x, y): ... return pearsonr(x, y).statistic >>> res = permutation_test((x, y), statistic, vectorized=False, ... permutation_type='pairings', ... alternative='greater') >>> r, pvalue, null = res.statistic, res.pvalue, res.null_distribution In this case, some elements of the null distribution differ from the observed value of the correlation coefficient ``r`` due to numerical noise. We manually inspect the elements of the null distribution that are nearly the same as the observed value of the test statistic. >>> r 0.8 >>> unique = np.unique(null) >>> unique array([-1. , -0.8, -0.8, -0.6, -0.4, -0.2, -0.2, 0. , 0.2, 0.2, 0.4, 0.6, 0.8, 0.8, 1. ]) >>> unique[np.isclose(r, unique)].tolist() [0.7999999999999999, 0.8] If `permutation_test` were to perform the comparison naively, the elements of the null distribution with value ``0.7999999999999999`` would not be considered as extreme or more extreme as the observed value of the statistic, so the calculated p-value would be too small. >>> incorrect_pvalue = np.count_nonzero(null >= r) / len(null) >>> incorrect_pvalue 0.1111111111111111 # may vary Instead, `permutation_test` treats elements of the null distribution that are within a factor of ``1+1e-14`` of the observed value of the statistic to be equal to the test statistic. >>> correct_pvalue = np.count_nonzero(null >= r / (1+1e-14)) / len(null) >>> correct_pvalue 0.16666666666666666 >>> res.pvalue == correct_pvalue True This method of comparison is expected to be accurate in most practical situations, but the user is advised to assess this by inspecting the elements of the null distribution that are close to the observed value of the statistic. Also, consider the use of statistics that can be calculated using exact arithmetic (e.g. integer statistics). """ args = _permutation_test_iv(data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) (data, statistic, permutation_type, vectorized, n_resamples, batch, alternative, axis, random_state) = args observed = statistic(*data, axis=-1) null_calculators = {"pairings": _calculate_null_pairings, "samples": _calculate_null_samples, "independent": _calculate_null_both} null_calculator_args = (data, statistic, n_resamples, batch, random_state) calculate_null = null_calculators[permutation_type] null_distribution, n_resamples, exact_test = ( calculate_null(*null_calculator_args)) # See References [2] and [3] adjustment = 0 if exact_test else 1 # relative tolerance for detecting numerically distinct but # theoretically equal values in the null distribution eps = 1e-14 def less(null_distribution, observed): cmps = null_distribution <= observed * (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def greater(null_distribution, observed): cmps = null_distribution >= observed / (1+eps) pvalues = (cmps.sum(axis=0) + adjustment) / (n_resamples + adjustment) return pvalues def two_sided(null_distribution, observed): pvalues_less = less(null_distribution, observed) pvalues_greater = greater(null_distribution, observed) pvalues = np.minimum(pvalues_less, pvalues_greater) * 2 return pvalues compare = {"less": less, "greater": greater, "two-sided": two_sided} pvalues = compare[alternative](null_distribution, observed) pvalues = np.clip(pvalues, 0, 1) return PermutationTestResult(observed, pvalues, null_distribution)
30,034
def check_reset_seed(env: gym.Env): """Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """ signature = inspect.signature(env.reset) if "seed" in signature.parameters or "kwargs" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), "`env.reset(seed=123)` is not deterministic as the observations are not equivalent" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not same when the same seeds are passed to `env.reset`." ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), "The observation returns by `env.reset(seed=456)` is not within the observation space" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not different when different seeds are passed to `env.reset`." ) except TypeError as e: raise AssertionError( "The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. " "This should never happen, please report this issue. " f"The error was: {e}" ) if env.unwrapped._np_random is None: logger.warn( "Resetting the environment did not result in seeding its random number generator. " "This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. " "If you do not use the python-level random number generator, this is not a problem." ) seed_param = signature.parameters.get("seed") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( "The default seed argument in reset should be `None`, " "otherwise the environment will by default always be deterministic. " f"Actual default: {seed_param.default}" ) else: raise gym.error.Error( "The `reset` method does not provide the `seed` keyword argument" )
def check_reset_seed(env: gym.Env): """Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """ signature = inspect.signature(env.reset) if "seed" in signature.parameters or "kwargs" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), "The observation returned by `env.reset(seed=123)` is not within the observation space" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), "`env.reset(seed=123)` is not deterministic as the observations are not equivalent" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not same when the same seeds are passed to `env.reset`." ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), "The observation returns by `env.reset(seed=456)` is not within the observation space" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not different when different seeds are passed to `env.reset`." ) except TypeError as e: raise AssertionError( "The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. " "This should never happen, please report this issue. " f"The error was: {e}" ) if env.unwrapped._np_random is None: logger.warn( "Resetting the environment did not result in seeding its random number generator. " "This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. " "If you do not use the python-level random number generator, this is not a problem." ) seed_param = signature.parameters.get("seed") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( "The default seed argument in reset should be `None`, " "otherwise the environment will by default always be deterministic. " f"Actual default: {seed_param.default}" ) else: raise gym.error.Error( "The `reset` method does not provide the `seed` keyword argument" )
9,027
def test_get_nick_id_migration(db: SopelDB): """Test nick with wrong casemapping are properly migrated.""" nick = 'Test[User]' old_nick = Identifier._lower_swapped(nick) # sanity check assert Identifier(nick).lower() != old_nick, ( 'Previous casemapping should be different from the new one') # insert old version with db.session() as session: nickname = Nicknames( nick_id=42, slug=Identifier._lower_swapped(nick), canonical=nick, ) session.add(nickname) session.commit() assert db.get_nick_id(nick) == 42, 'Old nick must be converted.' with db.session() as session: nicknames = session.execute( select(Nicknames) ).scalars().fetchall() assert len(nicknames) == 1, ( 'There should be only one instance of Nicknames.') nickname_found = nicknames[0] assert nickname_found.nick_id == 42 assert nickname_found.slug == Identifier(nick).lower() assert nickname_found.canonical == nick
def test_get_nick_id_migration(db: SopelDB): """Test nicks with wrong casemapping are properly migrated.""" nick = 'Test[User]' old_nick = Identifier._lower_swapped(nick) # sanity check assert Identifier(nick).lower() != old_nick, ( 'Previous casemapping should be different from the new one') # insert old version with db.session() as session: nickname = Nicknames( nick_id=42, slug=Identifier._lower_swapped(nick), canonical=nick, ) session.add(nickname) session.commit() assert db.get_nick_id(nick) == 42, 'Old nick must be converted.' with db.session() as session: nicknames = session.execute( select(Nicknames) ).scalars().fetchall() assert len(nicknames) == 1, ( 'There should be only one instance of Nicknames.') nickname_found = nicknames[0] assert nickname_found.nick_id == 42 assert nickname_found.slug == Identifier(nick).lower() assert nickname_found.canonical == nick
40,749
def test_wrong_input_shapes(): m = MeanAbsoluteRelativeError() with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4), torch.rand(4, 1))) with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4, 1), torch.rand(4,)))
def test_wrong_input_shapes(): m = MeanAbsoluteRelativeError() with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4), torch.rand(4, 1))) with raises(ValueError, match=r"Input data shapes should be the same, but given"): m.update((torch.rand(4, 1), torch.rand(4,)))
33,647
def connect(node, mode=WORKER_MODE, log_to_driver=False, worker=global_worker, driver_object_store_memory=None, job_id=None, internal_config=None): """Connect this worker to the raylet, to Plasma, and to Redis. Args: node (ray.node.Node): The node to connect. mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. worker: The ray.Worker instance. driver_object_store_memory: Limit the amount of memory the driver can use in the object store when creating objects. job_id: The ID of job. If it's None, then we will generate one. internal_config: Dictionary of (str,str) containing internal config options to override the defaults. """ # Do some basic checking to make sure we didn't call ray.init twice. error_message = "Perhaps you called ray.init twice by accident?" assert not worker.connected, error_message assert worker.cached_functions_to_run is not None, error_message # Enable nice stack traces on SIGSEGV etc. try: if not faulthandler.is_enabled(): faulthandler.enable(all_threads=False) except io.UnsupportedOperation: pass # ignore ray._raylet.set_internal_config(internal_config) # Create a Redis client to primary. # The Redis client can safely be shared between threads. However, # that is not true of Redis pubsub clients. See the documentation at # https://github.com/andymccurdy/redis-py#thread-safety. worker.redis_client = node.create_redis_client() # Initialize some fields. if mode is SCRIPT_MODE: # This is the code path of driver mode. if job_id is None: # TODO(qwang): use `GcsClient::GenerateJobId()` here. job_id = JobID.from_int( int(worker.redis_client.incr("JobCounter"))) # When tasks are executed on remote workers in the context of multiple # drivers, the current job ID is used to keep track of which job is # responsible for the task so that error messages will be propagated to # the correct driver. worker.worker_id = ray.utils.compute_driver_id_from_job( job_id).binary() else: # We should not specify the job_id if it's `WORKER_MODE` or `LOCAL_MODE`. assert job_id is None job_id = JobID.nil() # TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID` worker.worker_id = _random_string() if setproctitle: setproctitle.setproctitle("ray::IDLE") if not isinstance(job_id, JobID): raise TypeError("The type of given job id must be JobID.") # All workers start out as non-actors. A worker can be turned into an actor # after it is created. worker.node = node worker.set_mode(mode) # For driver's check that the version information matches the version # information that the Ray cluster was started with. try: ray.services.check_version_info(worker.redis_client) except Exception as e: if mode == SCRIPT_MODE: raise e elif mode == WORKER_MODE: traceback_str = traceback.format_exc() ray.utils.push_error_to_driver_through_redis( worker.redis_client, ray_constants.VERSION_MISMATCH_PUSH_ERROR, traceback_str, job_id=None) worker.lock = threading.RLock() # Create an object for interfacing with the global state. ray.state.state._initialize_global_state( node.redis_address, redis_password=node.redis_password) # Register the worker with Redis. if mode == SCRIPT_MODE: # The concept of a driver is the same as the concept of a "job". # Register the driver/job with Redis here. import __main__ as main driver_info = { "node_ip_address": node.node_ip_address, "driver_id": worker.worker_id, "start_time": time.time(), "plasma_store_socket": node.plasma_store_socket_name, "raylet_socket": node.raylet_socket_name, "name": (main.__file__ if hasattr(main, "__file__") else "INTERACTIVE MODE") } worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info) elif mode == WORKER_MODE: # Register the worker with Redis. worker_dict = { "node_ip_address": node.node_ip_address, "plasma_store_socket": node.plasma_store_socket_name, } # Check the RedirectOutput key in Redis and based on its value redirect # worker output and error to their own files. # This key is set in services.py when Redis is started. redirect_worker_output_val = worker.redis_client.get("RedirectOutput") if (redirect_worker_output_val is not None and int(redirect_worker_output_val) == 1): log_stdout_file, log_stderr_file = ( node.new_worker_redirected_log_file(worker.worker_id)) # Redirect stdout/stderr at the file descriptor level. If we simply # set sys.stdout and sys.stderr, then logging from C++ can fail to # be redirected. os.dup2(log_stdout_file.fileno(), sys.stdout.fileno()) os.dup2(log_stderr_file.fileno(), sys.stderr.fileno()) # We also manually set sys.stdout and sys.stderr because that seems # to have an affect on the output buffering. Without doing this, # stdout and stderr are heavily buffered resulting in seemingly # lost logging statements. sys.stdout = log_stdout_file sys.stderr = log_stderr_file # This should always be the first message to appear in the worker's # stdout and stderr log files. The string "Ray worker pid:" is # parsed in the log monitor process. print("Ray worker pid: {}".format(os.getpid())) print("Ray worker pid: {}".format(os.getpid()), file=sys.stderr) sys.stdout.flush() sys.stderr.flush() worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name) worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name) worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict) elif LOCAL_MODE: pass else: raise ValueError( "Invalid worker mode. Expected DRIVER, WORKER or LOCAL.") redis_address, redis_port = node.redis_address.split(":") gcs_options = ray._raylet.GcsClientOptions( redis_address, int(redis_port), node.redis_password, ) worker.core_worker = ray._raylet.CoreWorker( (mode == SCRIPT_MODE), node.plasma_store_socket_name, node.raylet_socket_name, job_id, gcs_options, node.get_logs_dir_path(), node.node_ip_address, node.node_manager_port, mode == LOCAL_MODE) if driver_object_store_memory is not None: worker.core_worker.set_object_store_client_options( "ray_driver_{}".format(os.getpid()), driver_object_store_memory) # Put something in the plasma store so that subsequent plasma store # accesses will be faster. Currently the first access is always slow, and # we don't want the user to experience this. if mode != LOCAL_MODE: temporary_object_id = ray.ObjectID.from_random() worker.put_object(1, object_id=temporary_object_id) ray.internal.free([temporary_object_id]) # Start the import thread worker.import_thread = import_thread.ImportThread(worker, mode, worker.threads_stopped) worker.import_thread.start() # If this is a driver running in SCRIPT_MODE, start a thread to print error # messages asynchronously in the background. Ideally the scheduler would # push messages to the driver's worker service, but we ran into bugs when # trying to properly shutdown the driver's worker service, so we are # temporarily using this implementation which constantly queries the # scheduler for new error messages. if mode == SCRIPT_MODE: q = queue.Queue() worker.listener_thread = threading.Thread( target=listen_error_messages_raylet, name="ray_listen_error_messages", args=(worker, q, worker.threads_stopped)) worker.printer_thread = threading.Thread( target=print_error_messages_raylet, name="ray_print_error_messages", args=(q, worker.threads_stopped)) worker.listener_thread.daemon = True worker.listener_thread.start() worker.printer_thread.daemon = True worker.printer_thread.start() if log_to_driver: worker.logger_thread = threading.Thread( target=print_logs, name="ray_print_logs", args=(worker.redis_client, worker.threads_stopped)) worker.logger_thread.daemon = True worker.logger_thread.start() if mode == SCRIPT_MODE: # Add the directory containing the script that is running to the Python # paths of the workers. Also add the current directory. Note that this # assumes that the directory structures on the machines in the clusters # are the same. script_directory = os.path.abspath(os.path.dirname(sys.argv[0])) current_directory = os.path.abspath(os.path.curdir) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, script_directory)) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, current_directory)) # TODO(rkn): Here we first export functions to run, then remote # functions. The order matters. For example, one of the functions to # run may set the Python path, which is needed to import a module used # to define a remote function. We may want to change the order to # simply be the order in which the exports were defined on the driver. # In addition, we will need to retain the ability to decide what the # first few exports are (mostly to set the Python path). Additionally, # note that the first exports to be defined on the driver will be the # ones defined in separate modules that are imported by the driver. # Export cached functions_to_run. for function in worker.cached_functions_to_run: worker.run_function_on_all_workers(function) worker.cached_functions_to_run = None
def connect(node, mode=WORKER_MODE, log_to_driver=False, worker=global_worker, driver_object_store_memory=None, job_id=None, internal_config=None): """Connect this worker to the raylet, to Plasma, and to Redis. Args: node (ray.node.Node): The node to connect. mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. worker: The ray.Worker instance. driver_object_store_memory: Limit the amount of memory the driver can use in the object store when creating objects. job_id: The ID of job. If it's None, then we will generate one. internal_config: Dictionary of (str,str) containing internal config options to override the defaults. """ # Do some basic checking to make sure we didn't call ray.init twice. error_message = "Perhaps you called ray.init twice by accident?" assert not worker.connected, error_message assert worker.cached_functions_to_run is not None, error_message # Enable nice stack traces on SIGSEGV etc. try: if not faulthandler.is_enabled(): faulthandler.enable(all_threads=False) except io.UnsupportedOperation: pass # ignore ray._raylet.set_internal_config(internal_config) # Create a Redis client to primary. # The Redis client can safely be shared between threads. However, # that is not true of Redis pubsub clients. See the documentation at # https://github.com/andymccurdy/redis-py#thread-safety. worker.redis_client = node.create_redis_client() # Initialize some fields. if mode is SCRIPT_MODE: # This is the code path of driver mode. if job_id is None: # TODO(qwang): use `GcsClient::GenerateJobId()` here. job_id = JobID.from_int( int(worker.redis_client.incr("JobCounter"))) # When tasks are executed on remote workers in the context of multiple # drivers, the current job ID is used to keep track of which job is # responsible for the task so that error messages will be propagated to # the correct driver. worker.worker_id = ray.utils.compute_driver_id_from_job( job_id).binary() else: # We should not specify the job_id if it's `WORKER_MODE` or `LOCAL_MODE`. assert job_id is None job_id = JobID.nil() # TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID` worker.worker_id = _random_string() if setproctitle: setproctitle.setproctitle("ray::IDLE") if not isinstance(job_id, JobID): raise TypeError("The type of given job id must be JobID.") # All workers start out as non-actors. A worker can be turned into an actor # after it is created. worker.node = node worker.set_mode(mode) # For driver's check that the version information matches the version # information that the Ray cluster was started with. try: ray.services.check_version_info(worker.redis_client) except Exception as e: if mode == SCRIPT_MODE: raise e elif mode == WORKER_MODE: traceback_str = traceback.format_exc() ray.utils.push_error_to_driver_through_redis( worker.redis_client, ray_constants.VERSION_MISMATCH_PUSH_ERROR, traceback_str, job_id=None) worker.lock = threading.RLock() # Create an object for interfacing with the global state. ray.state.state._initialize_global_state( node.redis_address, redis_password=node.redis_password) # Register the worker with Redis. if mode == SCRIPT_MODE: # The concept of a driver is the same as the concept of a "job". # Register the driver/job with Redis here. import __main__ as main driver_info = { "node_ip_address": node.node_ip_address, "driver_id": worker.worker_id, "start_time": time.time(), "plasma_store_socket": node.plasma_store_socket_name, "raylet_socket": node.raylet_socket_name, "name": (main.__file__ if hasattr(main, "__file__") else "INTERACTIVE MODE") } worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info) elif mode == WORKER_MODE: # Register the worker with Redis. worker_dict = { "node_ip_address": node.node_ip_address, "plasma_store_socket": node.plasma_store_socket_name, } # Check the RedirectOutput key in Redis and based on its value redirect # worker output and error to their own files. # This key is set in services.py when Redis is started. redirect_worker_output_val = worker.redis_client.get("RedirectOutput") if (redirect_worker_output_val is not None and int(redirect_worker_output_val) == 1): log_stdout_file, log_stderr_file = ( node.new_worker_redirected_log_file(worker.worker_id)) # Redirect stdout/stderr at the file descriptor level. If we simply # set sys.stdout and sys.stderr, then logging from C++ can fail to # be redirected. os.dup2(log_stdout_file.fileno(), sys.stdout.fileno()) os.dup2(log_stderr_file.fileno(), sys.stderr.fileno()) # We also manually set sys.stdout and sys.stderr because that seems # to have an affect on the output buffering. Without doing this, # stdout and stderr are heavily buffered resulting in seemingly # lost logging statements. sys.stdout = log_stdout_file sys.stderr = log_stderr_file # This should always be the first message to appear in the worker's # stdout and stderr log files. The string "Ray worker pid:" is # parsed in the log monitor process. print("Ray worker pid: {}".format(os.getpid())) print("Ray worker pid: {}".format(os.getpid()), file=sys.stderr) sys.stdout.flush() sys.stderr.flush() worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name) worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name) worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict) elif not LOCAL_MODE: raise ValueError( "Invalid worker mode. Expected DRIVER, WORKER or LOCAL.") redis_address, redis_port = node.redis_address.split(":") gcs_options = ray._raylet.GcsClientOptions( redis_address, int(redis_port), node.redis_password, ) worker.core_worker = ray._raylet.CoreWorker( (mode == SCRIPT_MODE), node.plasma_store_socket_name, node.raylet_socket_name, job_id, gcs_options, node.get_logs_dir_path(), node.node_ip_address, node.node_manager_port, mode == LOCAL_MODE) if driver_object_store_memory is not None: worker.core_worker.set_object_store_client_options( "ray_driver_{}".format(os.getpid()), driver_object_store_memory) # Put something in the plasma store so that subsequent plasma store # accesses will be faster. Currently the first access is always slow, and # we don't want the user to experience this. if mode != LOCAL_MODE: temporary_object_id = ray.ObjectID.from_random() worker.put_object(1, object_id=temporary_object_id) ray.internal.free([temporary_object_id]) # Start the import thread worker.import_thread = import_thread.ImportThread(worker, mode, worker.threads_stopped) worker.import_thread.start() # If this is a driver running in SCRIPT_MODE, start a thread to print error # messages asynchronously in the background. Ideally the scheduler would # push messages to the driver's worker service, but we ran into bugs when # trying to properly shutdown the driver's worker service, so we are # temporarily using this implementation which constantly queries the # scheduler for new error messages. if mode == SCRIPT_MODE: q = queue.Queue() worker.listener_thread = threading.Thread( target=listen_error_messages_raylet, name="ray_listen_error_messages", args=(worker, q, worker.threads_stopped)) worker.printer_thread = threading.Thread( target=print_error_messages_raylet, name="ray_print_error_messages", args=(q, worker.threads_stopped)) worker.listener_thread.daemon = True worker.listener_thread.start() worker.printer_thread.daemon = True worker.printer_thread.start() if log_to_driver: worker.logger_thread = threading.Thread( target=print_logs, name="ray_print_logs", args=(worker.redis_client, worker.threads_stopped)) worker.logger_thread.daemon = True worker.logger_thread.start() if mode == SCRIPT_MODE: # Add the directory containing the script that is running to the Python # paths of the workers. Also add the current directory. Note that this # assumes that the directory structures on the machines in the clusters # are the same. script_directory = os.path.abspath(os.path.dirname(sys.argv[0])) current_directory = os.path.abspath(os.path.curdir) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, script_directory)) worker.run_function_on_all_workers( lambda worker_info: sys.path.insert(1, current_directory)) # TODO(rkn): Here we first export functions to run, then remote # functions. The order matters. For example, one of the functions to # run may set the Python path, which is needed to import a module used # to define a remote function. We may want to change the order to # simply be the order in which the exports were defined on the driver. # In addition, we will need to retain the ability to decide what the # first few exports are (mostly to set the Python path). Additionally, # note that the first exports to be defined on the driver will be the # ones defined in separate modules that are imported by the driver. # Export cached functions_to_run. for function in worker.cached_functions_to_run: worker.run_function_on_all_workers(function) worker.cached_functions_to_run = None
7,187
def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r"""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions <numpy-images-coordinate-conventions>` for more details. .. deprecated:: 0.16.0 Use "rc" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """ if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use ndimage.label to label the connected' 'components of the image,' 'or label_image.astype(np.uint8) to interpret' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates="rc"` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than "rc" for the "coordinates" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use "rc" coordinates and ' 'stop using the "coordinates" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions
def regionprops(label_image, intensity_image=None, cache=True, coordinates=None): r"""Measure properties of labeled image regions. Parameters ---------- label_image : (N, M) ndarray Labeled input image. Labels with value 0 are ignored. .. versionchanged:: 0.14.1 Previously, ``label_image`` was processed by ``numpy.squeeze`` and so any number of singleton dimensions was allowed. This resulted in inconsistent handling of images with singleton dimensions. To recover the old behaviour, use ``regionprops(np.squeeze(label_image), ...)``. intensity_image : (N, M) ndarray, optional Intensity (i.e., input) image with same size as labeled image. Default is None. cache : bool, optional Determine whether to cache calculated properties. The computation is much faster for cached properties, whereas the memory consumption increases. coordinates : DEPRECATED This argument is deprecated and will be removed in a future version of scikit-image. See :ref:`Coordinate conventions <numpy-images-coordinate-conventions>` for more details. .. deprecated:: 0.16.0 Use "rc" coordinates everywhere. It may be sufficient to call ``numpy.transpose`` on your label image to get the same values as 0.15 and earlier. However, for some properties, the transformation will be less trivial. For example, the new orientation is :math:`\frac{\pi}{2}` plus the old orientation. Returns ------- properties : list of RegionProperties Each item describes one labeled region, and can be accessed using the attributes listed below. Notes ----- The following properties can be accessed as attributes or keys: **area** : int Number of pixels of the region. **bbox** : tuple Bounding box ``(min_row, min_col, max_row, max_col)``. Pixels belonging to the bounding box are in the half-open interval ``[min_row; max_row)`` and ``[min_col; max_col)``. **bbox_area** : int Number of pixels of bounding box. **centroid** : array Centroid coordinate tuple ``(row, col)``. **convex_area** : int Number of pixels of convex hull image, which is the smallest convex polygon that encloses the region. **convex_image** : (H, J) ndarray Binary convex hull image which has the same size as bounding box. **coords** : (N, 2) ndarray Coordinate list ``(row, col)`` of the region. **eccentricity** : float Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points) over the major axis length. The value is in the interval [0, 1). When it is 0, the ellipse becomes a circle. **equivalent_diameter** : float The diameter of a circle with the same area as the region. **euler_number** : int Euler characteristic of region. Computed as number of objects (= 1) subtracted by number of holes (8-connectivity). **extent** : float Ratio of pixels in the region to pixels in the total bounding box. Computed as ``area / (rows * cols)`` **filled_area** : int Number of pixels of the region will all the holes filled in. Describes the area of the filled_image. **filled_image** : (H, J) ndarray Binary region image with filled holes which has the same size as bounding box. **image** : (H, J) ndarray Sliced binary region image which has the same size as bounding box. **inertia_tensor** : ndarray Inertia tensor of the region for the rotation around its mass. **inertia_tensor_eigvals** : tuple The eigenvalues of the inertia tensor in decreasing order. **intensity_image** : ndarray Image inside region bounding box. **label** : int The label in the labeled input image. **local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box. **major_axis_length** : float The length of the major axis of the ellipse that has the same normalized second central moments as the region. **max_intensity** : float Value with the greatest intensity in the region. **mean_intensity** : float Value with the mean intensity in the region. **min_intensity** : float Value with the least intensity in the region. **minor_axis_length** : float The length of the minor axis of the ellipse that has the same normalized second central moments as the region. **moments** : (3, 3) ndarray Spatial moments up to 3rd order:: m_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **moments_central** : (3, 3) ndarray Central moments (translation invariant) up to 3rd order:: mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's centroid. **moments_hu** : tuple Hu moments (translation, scale and rotation invariant). **moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) up to 3rd order:: nu_ij = mu_ij / m_00^[(i+j)/2 + 1] where `m_00` is the zeroth spatial moment. **orientation** : float Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from `-pi/2` to `pi/2` counter-clockwise. **perimeter** : float Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity. **slice** : tuple of slices A slice to extract the object from the source image. **solidity** : float Ratio of pixels in the region to pixels of the convex hull image. **weighted_centroid** : array Centroid coordinate tuple ``(row, col)`` weighted with intensity image. **weighted_local_centroid** : array Centroid coordinate tuple ``(row, col)``, relative to region bounding box, weighted with intensity image. **weighted_moments** : (3, 3) ndarray Spatial moments of intensity image up to 3rd order:: wm_ij = sum{ array(row, col) * row^i * col^j } where the sum is over the `row`, `col` coordinates of the region. **weighted_moments_central** : (3, 3) ndarray Central moments (translation invariant) of intensity image up to 3rd order:: wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j } where the sum is over the `row`, `col` coordinates of the region, and `row_c` and `col_c` are the coordinates of the region's weighted centroid. **weighted_moments_hu** : tuple Hu moments (translation, scale and rotation invariant) of intensity image. **weighted_moments_normalized** : (3, 3) ndarray Normalized moments (translation and scale invariant) of intensity image up to 3rd order:: wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1] where ``wm_00`` is the zeroth spatial moment (intensity-weighted area). Each region also supports iteration, so that you can do:: for prop in region: print(prop, region[prop]) See Also -------- label References ---------- .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing: Core Algorithms. Springer-Verlag, London, 2009. .. [2] B. Jähne. Digital Image Processing. Springer-Verlag, Berlin-Heidelberg, 6. edition, 2005. .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image Features, from Lecture notes in computer science, p. 676. Springer, Berlin, 1993. .. [4] https://en.wikipedia.org/wiki/Image_moment Examples -------- >>> from skimage import data, util >>> from skimage.measure import label >>> img = util.img_as_ubyte(data.coins()) > 110 >>> label_img = label(img, connectivity=img.ndim) >>> props = regionprops(label_img) >>> # centroid of first labeled object >>> props[0].centroid (22.729879860483141, 81.912285234465827) >>> # centroid of first labeled object >>> props[0]['centroid'] (22.729879860483141, 81.912285234465827) """ if label_image.ndim not in (2, 3): raise TypeError('Only 2-D and 3-D images supported.') if not np.issubdtype(label_image.dtype, np.integer): raise TypeError('Non-integer image types are ambiguous:' 'use ndimage.label to label the connected' 'components of the image,' 'or label_image.astype(np.uint8) to interpret ' 'the True values as a single label') if coordinates is not None: if coordinates == 'rc': msg = ('The coordinates keyword argument to skimage.measure.' 'regionprops is deprecated. All features are now computed ' 'in rc (row-column) coordinates. Please remove ' '`coordinates="rc"` from all calls to regionprops before ' 'updating scikit-image.') warn(msg, stacklevel=2, category=FutureWarning) else: msg = ('Values other than "rc" for the "coordinates" argument ' 'to skimage.measure.regionprops are no longer supported. ' 'You should update your code to use "rc" coordinates and ' 'stop using the "coordinates" argument, or use skimage ' 'version 0.15.x or earlier.') raise ValueError(msg) regions = [] objects = ndi.find_objects(label_image) for i, sl in enumerate(objects): if sl is None: continue label = i + 1 props = RegionProperties(sl, label, label_image, intensity_image, cache) regions.append(props) return regions
6,493
def execute(filters=None): if not filters: filters = {} if filters.hide_year_field == 1: filters.year = get_datetime().year conditions, filters = get_conditions(filters) columns, days = get_columns(filters) att_map = get_attendance_list(conditions, filters) if filters.group_by: emp_map, group_by_parameters = get_employee_details(filters.group_by, filters.company) holiday_list = [] for parameter in group_by_parameters: h_list = [emp_map[parameter][d]["holiday_list"] for d in emp_map[parameter] if emp_map[parameter][d]["holiday_list"]] holiday_list += h_list else: emp_map = get_employee_details(filters.group_by, filters.company) holiday_list = [emp_map[d]["holiday_list"] for d in emp_map if emp_map[d]["holiday_list"]] default_holiday_list = frappe.get_cached_value('Company', filters.get("company"), "default_holiday_list") holiday_list.append(default_holiday_list) holiday_list = list(set(holiday_list)) holiday_map = get_holiday(holiday_list, filters["from_date"], filters['to_date']) data = [] leave_list = None if filters.summarized_view: leave_types = frappe.db.sql("""select name from `tabLeave Type`""", as_list=True) leave_list = [d[0] + ":Float:120" for d in leave_types] columns.extend(leave_list) columns.extend([_("Total Late Entries") + ":Float:120", _("Total Early Exits") + ":Float:120"]) if filters.group_by: emp_att_map = {} for parameter in group_by_parameters: data.append([ "<b>"+ parameter + "</b>"]) record, aaa = add_data(emp_map[parameter], att_map, holiday_map, filters, default_holiday_list, leave_list=leave_list) emp_att_map.update(aaa) data += record else: record, emp_att_map = add_data(emp_map, att_map, holiday_map, filters, default_holiday_list, leave_list=leave_list) data += record chart_data = get_chart_data(emp_att_map, days) return columns, data, None, chart_data
def execute(filters=None): if not filters: filters = {} if filters.hide_year_field == 1: filters.year = get_datetime().year conditions, filters = get_conditions(filters) columns, days = get_columns(filters) att_map = get_attendance_list(conditions, filters) if filters.group_by: emp_map, group_by_parameters = get_employee_details(filters.group_by, filters.company) holiday_list = [] for parameter in group_by_parameters: h_list = [emp_map[parameter][d]["holiday_list"] for d in emp_map[parameter] if emp_map[parameter][d]["holiday_list"]] holiday_list += h_list else: emp_map = get_employee_details(filters.group_by, filters.company) holiday_list = [emp_map[d]["holiday_list"] for d in emp_map if emp_map[d]["holiday_list"]] default_holiday_list = frappe.get_cached_value('Company', filters.get("company"), "default_holiday_list") holiday_list.append(default_holiday_list) holiday_list = list(set(holiday_list)) holiday_map = get_holiday(holiday_list, filters["from_date"], filters['to_date']) data = [] leave_list = None if filters.summarized_view: leave_types = frappe.db.sql("""select name from `tabLeave Type`""", as_list=True) leave_list = [d[0] + ":Float:120" for d in leave_types] columns.extend(leave_list) columns.extend([_("Total Late Entries") + ":Float:120", _("Total Early Exits") + ":Float:120"]) if filters.group_by: emp_att_map = {} for parameter in group_by_parameters: data.append([ "<b>"+ parameter + "</b>"]) record, aaa = add_data(emp_map[parameter], att_map, holiday_map, filters, default_holiday_list, leave_list=leave_list) emp_att_map.update(aaa) data += record else: record, attendance = add_data(emp_map, att_map, holiday_map, filters, default_holiday_list, leave_list=leave_list) data += record chart_data = get_chart_data(emp_att_map, days) return columns, data, None, chart_data
20,314
def status(options: 'argparse.Namespace') -> None: print('Subproject status') for w in glob('subprojects/*.wrap'): name = os.path.basename(w)[:-5] try: (latest_branch, latest_revision) = get_latest_version(name) except Exception: print('', name, 'not available in wrapdb.', file=sys.stderr) continue try: (current_branch, current_revision, _, _, _) = get_current_version(w) except Exception: print('Wrap file "{}" not from wrapdb.'.format(name), file=sys.stderr) continue if current_branch == latest_branch and current_revision == latest_revision: print('', name, f'up to date. Branch {current_branch}, revision {current_revision}.') else: print('', name, f'not up to date. Have {current_branch} {current_revision}, but {latest_branch} {latest_revision} is available.')
def status(options: 'argparse.Namespace') -> None: print('Subproject status') for w in glob('subprojects/*.wrap'): name = os.path.basename(w)[:-5] try: (latest_branch, latest_revision) = get_latest_version(name) except Exception: print('', name, 'not available in wrapdb.', file=sys.stderr) continue try: (current_branch, current_revision, _, _, _) = get_current_version(w) except Exception: print(f'Wrap file {name!r} not from wrapdb.', file=sys.stderr) continue if current_branch == latest_branch and current_revision == latest_revision: print('', name, f'up to date. Branch {current_branch}, revision {current_revision}.') else: print('', name, f'not up to date. Have {current_branch} {current_revision}, but {latest_branch} {latest_revision} is available.')
30,602
def get_current_table(grid_id: str, sort_by: Optional[str], columns: Optional[str]) -> \ Tuple[List[Dict[Any, Any]], Any]: """ Get current grid data Date retreived: 1. Column names. 2. Current grid data. Validate: 1. Correct number of context paths. 2. Sort_by is a name of a column. 3. Grid ID. 4. Columns exists. Args: grid_id(str): Normalized Grid ID (Machine name in `Settings -> Advanced -> Fields -> Field property` or in Incident Context Data. sort_by(str): The static name of the column to sort the table rows by. columns(str): Comma separated list of columns names, Should be defined if grid is empty otherwise the automation detect it automatically. Returns: list: Current grid as dict in following structure - [{'col1': 'val1'},{'col2': 'val2'},{'col3': 'val3'}, {'col4': 'val4'}]. list: Table columns name. """ # Get current Grid data current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id) if not current_table: raise ValueError(f"The grid id isn't valid : {grid_id}") # Validate columns number the same as context paths - If no data initiated skip validation, but check if columns specified if not columns: raise ValueError("Columns not specified - Its a mandatory arg when grid is empty.") # Get columns columns = argToList(columns.lower()) # Validate sort is valide col if sort_by and sort_by not in columns: raise ValueError(f'sort_by: {sort_by} is not columns: {columns}') return current_table, columns
def get_current_table(grid_id: str, sort_by: Optional[str], columns: Optional[str]) -> \ Tuple[List[Dict[Any, Any]], Any]: """ Get current grid data Data retrieved: 1. Column names. 2. Current grid data. Validate: 1. Correct number of context paths. 2. Sort_by is a name of a column. 3. Grid ID. 4. Columns exists. Args: grid_id(str): Normalized Grid ID (Machine name in `Settings -> Advanced -> Fields -> Field property` or in Incident Context Data. sort_by(str): The static name of the column to sort the table rows by. columns(str): Comma separated list of columns names, Should be defined if grid is empty otherwise the automation detect it automatically. Returns: list: Current grid as dict in following structure - [{'col1': 'val1'},{'col2': 'val2'},{'col3': 'val3'}, {'col4': 'val4'}]. list: Table columns name. """ # Get current Grid data current_table: Optional[List[dict]] = demisto.incidents()[0].get("CustomFields", {}).get(grid_id) if not current_table: raise ValueError(f"The grid id isn't valid : {grid_id}") # Validate columns number the same as context paths - If no data initiated skip validation, but check if columns specified if not columns: raise ValueError("Columns not specified - Its a mandatory arg when grid is empty.") # Get columns columns = argToList(columns.lower()) # Validate sort is valide col if sort_by and sort_by not in columns: raise ValueError(f'sort_by: {sort_by} is not columns: {columns}') return current_table, columns
57,781
def fetch_indicators(client: Client, tlp_color, include_deleted, type, malicious_confidence, filter, q, limit): """ fetch indicators from the Crowdstrike Intel Args: client: Client object tlp_color (str): Traffic Light Protocol color. include_deleted (bool): include deleted indicators. (send just as parameter) type (list): type indicator. malicious_confidence(str): medium, low, high filter (str): indicators filter. q (str): generic phrase match limit (int): max num of indicators to fetch Returns: list of indicators(list) """ parsed_indicators = client.get_indicators( type=type, malicious_confidence=malicious_confidence, filter=filter, q=q, include_deleted=include_deleted, fetch_command=True, limit=limit, tlp_color=tlp_color ) return parsed_indicators
def fetch_indicators(client: Client, tlp_color, include_deleted, type, malicious_confidence, filter, q, limit): """ fetch indicators from the Crowdstrike Intel Args: client: Client object tlp_color (str): Traffic Light Protocol color. include_deleted (bool): include deleted indicators. (send just as parameter) type (list): type indicator. malicious_confidence(str): medium, low, high filter (str): indicators filter. q (str): generic phrase match limit (int): max num of indicators to fetch Returns: list of indicators(list) """ parsed_indicators = client.get_indicators( type=self.type, malicious_confidence=self.malicious_confidence, filter=filter, q=self.generic_phrase, include_deleted=self.include_deleted, fetch_command=True, tlp_color=self.tlp_color ) return parsed_indicators
56,404
def run(build_command, output_file_name): """ Run bazel aquaery and generate compile_commands.json: - get bazel info params - run bazel aquery and parse output as JSON data - create compile commands from JSON data: * take only arguments * add: file, directory, command * change to full paths * filter compiler options - save to output file (compile_commands.json) """ if not build_command: logging.error("No bazel build command specified") return False if not output_file_name: logging.error("No output file is specified") return False bazel_info = split_to_list(build_command)[0] + " info " bazel_workspace = run_command(bazel_info + "workspace").rstrip() bazel_output_base = run_command(bazel_info + "output_base").rstrip() bazel_execution_root = run_command(bazel_info + "execution_root").rstrip() bazel_work_dir = bazel_execution_root.replace("/execroot/", "/external/") if not os.path.exists(bazel_work_dir): bazel_work_dir = bazel_workspace logging.info("Bazel workspace: %s", bazel_workspace) logging.info("Bazel output base: %s", bazel_output_base) logging.info("Bazel work dir: %s", bazel_work_dir) logging.info("Bazel build options: %s", build_command) data = bazel_aquery(build_command) if not data: logging.error("Command '%s'", build_command) logging.error("Produces no output") return False directories = [ bazel_output_base, bazel_work_dir, ] compile_commands = get_compile_commands(data, directories, output_base=bazel_output_base) logging.info("Saving to: %s", output_file_name) with open(output_file_name, "w") as compile_commands_file: json.dump(compile_commands, compile_commands_file, indent=4) return True
def run(build_command, output_file_name): """ Run bazel aquaery and generate compile_commands.json: - get bazel info params - run bazel aquery and parse output as JSON data - create compile commands from JSON data: * take only arguments * add: file, directory, command * change to full paths * filter compiler options - save to output file (compile_commands.json) """ if not build_command: logging.error("No bazel build command specified") return False if not output_file_name: logging.error("No output file is specified") return False cmd = shlex.split(build_command) bazel_info = cmd[0] + " info " bazel_workspace = run_command(bazel_info + "workspace").rstrip() bazel_output_base = run_command(bazel_info + "output_base").rstrip() bazel_execution_root = run_command(bazel_info + "execution_root").rstrip() bazel_work_dir = bazel_execution_root.replace("/execroot/", "/external/") if not os.path.exists(bazel_work_dir): bazel_work_dir = bazel_workspace logging.info("Bazel workspace: %s", bazel_workspace) logging.info("Bazel output base: %s", bazel_output_base) logging.info("Bazel work dir: %s", bazel_work_dir) logging.info("Bazel build options: %s", build_command) data = bazel_aquery(build_command) if not data: logging.error("Command '%s'", build_command) logging.error("Produces no output") return False directories = [ bazel_output_base, bazel_work_dir, ] compile_commands = get_compile_commands(data, directories, output_base=bazel_output_base) logging.info("Saving to: %s", output_file_name) with open(output_file_name, "w") as compile_commands_file: json.dump(compile_commands, compile_commands_file, indent=4) return True
27,540
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list(reversed(sorted(all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName("locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = {"number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked} track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName("file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName("pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file(clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: log.warning('Failed to create File object for %s' % clip_path) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName("name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float(clip_element.getElementsByTagName("start")[0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float(clip_element.getElementsByTagName("in")[0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float(clip_element.getElementsByTagName("out")[0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName("effect"): effectid = effect_element.getElementsByTagName("effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float(keyframe_element.getElementsByTagName("when")[0].childNodes[0].nodeValue) keyframe_value = float(keyframe_element.getElementsByTagName("value")[0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append( { "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear } ) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float(keyframe_element.getElementsByTagName("when")[0].childNodes[0].nodeValue) keyframe_value = float(keyframe_element.getElementsByTagName("value")[0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append( { "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear } ) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame(app.window.preview_thread.player.Position()) # Free up DOM memory xmldoc.unlink()
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list(reversed(sorted(all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName("locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = {"number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked} track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName("file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName("pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file(clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName("name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float(clip_element.getElementsByTagName("start")[0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float(clip_element.getElementsByTagName("in")[0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float(clip_element.getElementsByTagName("out")[0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName("effect"): effectid = effect_element.getElementsByTagName("effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float(keyframe_element.getElementsByTagName("when")[0].childNodes[0].nodeValue) keyframe_value = float(keyframe_element.getElementsByTagName("value")[0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append( { "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear } ) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float(keyframe_element.getElementsByTagName("when")[0].childNodes[0].nodeValue) keyframe_value = float(keyframe_element.getElementsByTagName("value")[0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append( { "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear } ) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame(app.window.preview_thread.player.Position()) # Free up DOM memory xmldoc.unlink()
1,174
def ICC_rep_anova(Y, nocache=False): """ the data Y are entered as a 'table' ie subjects are in rows and repeated measures in columns One Sample Repeated measure ANOVA Y = XB + E with X = [FaTor / Subjects] This is a hacked up (but fully compatible) version of ICC_rep_anova from nipype that caches some very expensive operations that depend only on the input array shape - if you're going to run the routine multiple times (like, on every voxel of an image), this gives you a HUGE speed boost for large input arrays. If you change the dimensions of Y, it will reinitialize automatially. Set nocache to True to get the original, much slower behavior. No, actually, don't do that. """ global icc_inited global current_Y_shape global dfc, dfe, dfr global nb_subjects, nb_conditions global x, x0, X global centerbit try: current_Y_shape if nocache or (current_Y_shape != Y.shape): icc_inited = False except NameError: icc_inited = False if not icc_inited: [nb_subjects, nb_conditions] = Y.shape current_Y_shape = Y.shape dfc = nb_conditions - 1 dfe = (nb_subjects - 1) * dfc dfr = nb_subjects - 1 # Compute the repeated measure effect # ------------------------------------ # Sum Square Total mean_Y = mean(Y) SST = ((Y - mean_Y) ** 2).sum() # create the design matrix for the different levels if not icc_inited: x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects X = hstack([x, x0]) centerbit = dot(dot(X, pinv(dot(X.T, X))), X.T) # Sum Square Error predicted_Y = dot(centerbit, Y.flatten("F")) residuals = Y.flatten("F") - predicted_Y SSE = (residuals ** 2).sum() residuals.shape = Y.shape MSE = SSE / dfe # Sum square session effect - between columns/sessions SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects MSC = SSC / dfc / nb_subjects session_effect_F = MSC / MSE # Sum Square subject effect - between rows/subjects SSR = SST - SSC - SSE MSR = SSR / dfr # ICC(3,1) = (mean square subjeT - mean square error) / # (mean square subjeT + (k-1)*-mean square error) ICC = nan_to_num((MSR - MSE) / (MSR + dfc * MSE)) e_var = MSE # variance of error r_var = (MSR - MSE) / nb_conditions # variance between subjects icc_inited = True return ICC, r_var, e_var, session_effect_F, dfc, dfe
def ICC_rep_anova(Y, nocache=False): """ the data Y are entered as a 'table' ie subjects are in rows and repeated measures in columns One Sample Repeated measure ANOVA Y = XB + E with X = [FaTor / Subjects] This is a hacked up (but fully compatible) version of ICC_rep_anova from nipype that caches some very expensive operations that depend only on the input array shape - if you're going to run the routine multiple times (like, on every voxel of an image), this gives you a HUGE speed boost for large input arrays. If you change the dimensions of Y, it will reinitialize automatially. Set nocache to True to get the original, much slower behavior. No, actually, don't do that. """ global icc_inited global current_Y_shape global dfc, dfe, dfr global nb_subjects, nb_conditions global x, x0, X global centerbit try: current_Y_shape if nocache or (current_Y_shape != Y.shape): icc_inited = False except NameError: icc_inited = False if not icc_inited: [nb_subjects, nb_conditions] = Y.shape current_Y_shape = Y.shape dfc = nb_conditions - 1 dfe = (nb_subjects - 1) * dfc dfr = nb_subjects - 1 # Compute the repeated measure effect # ------------------------------------ # Sum Square Total mean_Y = mean(Y) SST = ((Y - mean_Y) ** 2).sum() # create the design matrix for the different levels if not icc_inited: x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects X = hstack([x, x0]) centerbit = X @ pinv(X.T @ X) @ X.T # Sum Square Error predicted_Y = dot(centerbit, Y.flatten("F")) residuals = Y.flatten("F") - predicted_Y SSE = (residuals ** 2).sum() residuals.shape = Y.shape MSE = SSE / dfe # Sum square session effect - between columns/sessions SSC = ((mean(Y, 0) - mean_Y) ** 2).sum() * nb_subjects MSC = SSC / dfc / nb_subjects session_effect_F = MSC / MSE # Sum Square subject effect - between rows/subjects SSR = SST - SSC - SSE MSR = SSR / dfr # ICC(3,1) = (mean square subjeT - mean square error) / # (mean square subjeT + (k-1)*-mean square error) ICC = nan_to_num((MSR - MSE) / (MSR + dfc * MSE)) e_var = MSE # variance of error r_var = (MSR - MSE) / nb_conditions # variance between subjects icc_inited = True return ICC, r_var, e_var, session_effect_F, dfc, dfe
14,835
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the classifier.""" if DATA_FACEBOX not in hass.data: hass.data[DATA_FACEBOX] = [] ip_address = config[CONF_IP_ADDRESS] port = config[CONF_PORT] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) protocol = config.get(CONF_PROTOCOL, "https") url_health = f"{protocol}://{ip_address}:{port}/healthz" hostname = check_box_health(url_health, username, password) if hostname is None: return entities = [] for camera in config[CONF_SOURCE]: facebox = FaceClassifyEntity( ip_address, port, username, password, hostname, camera[CONF_ENTITY_ID], protocol, camera.get(CONF_NAME), ) entities.append(facebox) hass.data[DATA_FACEBOX].append(facebox) add_entities(entities) def service_handle(service): """Handle for services.""" entity_ids = service.data.get("entity_id") classifiers = hass.data[DATA_FACEBOX] if entity_ids: classifiers = [c for c in classifiers if c.entity_id in entity_ids] for classifier in classifiers: name = service.data.get(ATTR_NAME) file_path = service.data.get(FILE_PATH) classifier.teach(name, file_path) hass.services.register( DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA )
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the classifier.""" if DATA_FACEBOX not in hass.data: hass.data[DATA_FACEBOX] = [] ip_address = config[CONF_IP_ADDRESS] port = config[CONF_PORT] username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) protocol = config.get(CONF_PROTOCOL) url_health = f"{protocol}://{ip_address}:{port}/healthz" hostname = check_box_health(url_health, username, password) if hostname is None: return entities = [] for camera in config[CONF_SOURCE]: facebox = FaceClassifyEntity( ip_address, port, username, password, hostname, camera[CONF_ENTITY_ID], protocol, camera.get(CONF_NAME), ) entities.append(facebox) hass.data[DATA_FACEBOX].append(facebox) add_entities(entities) def service_handle(service): """Handle for services.""" entity_ids = service.data.get("entity_id") classifiers = hass.data[DATA_FACEBOX] if entity_ids: classifiers = [c for c in classifiers if c.entity_id in entity_ids] for classifier in classifiers: name = service.data.get(ATTR_NAME) file_path = service.data.get(FILE_PATH) classifier.teach(name, file_path) hass.services.register( DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA )
17,752
def build_dataframe(args, constructor): """ Constructs a dataframe and modifies `args` in-place. The argument values in `args` can be either strings corresponding to existing columns of a dataframe, or data arrays (lists, numpy arrays, pandas columns, series). Parameters ---------- args : OrderedDict arguments passed to the px function and subsequently modified constructor : graph_object trace class the trace type selected for this figure """ # make copies of all the fields via dict() and list() for field in args: if field in array_attrables and args[field] is not None: args[field] = ( dict(args[field]) if isinstance(args[field], dict) else list(args[field]) ) # Cast data_frame argument to DataFrame (it could be a numpy array, dict etc.) df_provided = args["data_frame"] is not None if df_provided and not isinstance(args["data_frame"], pd.DataFrame): args["data_frame"] = pd.DataFrame(args["data_frame"]) df_input = args["data_frame"] # now we handle special cases like wide-mode or x-xor-y specification # by rearranging args to tee things up for process_args_into_dataframe to work no_x = args.get("x", None) is None no_y = args.get("y", None) is None wide_x = False if no_x else _is_col_list(df_input, args["x"]) wide_y = False if no_y else _is_col_list(df_input, args["y"]) wide_mode = False var_name = None # will likely be "variable" in wide_mode wide_cross_name = None # will likely be "index" in wide_mode value_name = None # will likely be "value" in wide_mode hist2d_types = [go.Histogram2d, go.Histogram2dContour] if constructor in cartesians: if wide_x and wide_y: raise ValueError( "Cannot accept list of column references or list of columns for both `x` and `y`." ) if df_provided and no_x and no_y: wide_mode = True if isinstance(df_input.columns, pd.MultiIndex): raise TypeError( "Data frame columns is a pandas MultiIndex. " "pandas MultiIndex is not supported by plotly express " "at the moment." ) args["wide_variable"] = list(df_input.columns) var_name = df_input.columns.name if var_name in [None, "value", "index"] or var_name in df_input: var_name = "variable" if constructor == go.Funnel: wide_orientation = args.get("orientation", None) or "h" else: wide_orientation = args.get("orientation", None) or "v" args["orientation"] = wide_orientation args["wide_cross"] = None elif wide_x != wide_y: wide_mode = True args["wide_variable"] = args["y"] if wide_y else args["x"] if df_provided and args["wide_variable"] is df_input.columns: var_name = df_input.columns.name if isinstance(args["wide_variable"], pd.Index): args["wide_variable"] = list(args["wide_variable"]) if var_name in [None, "value", "index"] or ( df_provided and var_name in df_input ): var_name = "variable" if constructor == go.Histogram: wide_orientation = "v" if wide_x else "h" else: wide_orientation = "v" if wide_y else "h" args["y" if wide_y else "x"] = None args["wide_cross"] = None if not no_x and not no_y: wide_cross_name = "__x__" if wide_y else "__y__" if wide_mode: value_name = _escape_col_name(df_input, "value", []) var_name = _escape_col_name(df_input, var_name, []) missing_bar_dim = None if constructor in [go.Scatter, go.Bar, go.Funnel] + hist2d_types: if not wide_mode and (no_x != no_y): for ax in ["x", "y"]: if args.get(ax, None) is None: args[ax] = df_input.index if df_provided else Range() if constructor == go.Bar: missing_bar_dim = ax else: if args["orientation"] is None: args["orientation"] = "v" if ax == "x" else "h" if wide_mode and wide_cross_name is None: if no_x != no_y and args["orientation"] is None: args["orientation"] = "v" if no_x else "h" if df_provided: if isinstance(df_input.index, pd.MultiIndex): raise TypeError( "Data frame index is a pandas MultiIndex. " "pandas MultiIndex is not supported by plotly express " "at the moment." ) args["wide_cross"] = df_input.index else: args["wide_cross"] = Range( label=_escape_col_name(df_input, "index", [var_name, value_name]) ) # now that things have been prepped, we do the systematic rewriting of `args` df_output, wide_id_vars = process_args_into_dataframe( args, wide_mode, var_name, value_name ) # now that `df_output` exists and `args` contains only references, we complete # the special-case and wide-mode handling by further rewriting args and/or mutating # df_output count_name = _escape_col_name(df_output, "count", [var_name, value_name]) if not wide_mode and missing_bar_dim and constructor == go.Bar: # now that we've populated df_output, we check to see if the non-missing # dimension is categorical: if so, then setting the missing dimension to a # constant 1 is a less-insane thing to do than setting it to the index by # default and we let the normal auto-orientation-code do its thing later other_dim = "x" if missing_bar_dim == "y" else "y" if not _is_continuous(df_output, args[other_dim]): args[missing_bar_dim] = count_name df_output[count_name] = 1 else: # on the other hand, if the non-missing dimension is continuous, then we # can use this information to override the normal auto-orientation code if args["orientation"] is None: args["orientation"] = "v" if missing_bar_dim == "x" else "h" if constructor in hist2d_types: del args["orientation"] if wide_mode: # at this point, `df_output` is semi-long/semi-wide, but we know which columns # are which, so we melt it and reassign `args` to refer to the newly-tidy # columns, keeping track of various names and manglings set up above wide_value_vars = [c for c in args["wide_variable"] if c not in wide_id_vars] del args["wide_variable"] if wide_cross_name == "__x__": wide_cross_name = args["x"] elif wide_cross_name == "__y__": wide_cross_name = args["y"] else: wide_cross_name = args["wide_cross"] del args["wide_cross"] dtype = None for v in wide_value_vars: v_dtype = df_output[v].dtype.kind v_dtype = "number" if v_dtype in ["i", "f"] else v_dtype if dtype is None: dtype = v_dtype elif dtype != v_dtype: raise ValueError( "Plotly Express cannot process wide-form data with columns of different type." ) df_output = df_output.melt( id_vars=wide_id_vars, value_vars=wide_value_vars, var_name=var_name, value_name=value_name, ) assert len(df_output.columns) == len(set(df_output.columns)), ( "Wide-mode name-inference failure, likely due to a internal bug. " "Please report this to " "https://github.com/plotly/plotly.py/issues/new and we will try to " "replicate and fix it." ) df_output[var_name] = df_output[var_name].astype(str) orient_v = wide_orientation == "v" if constructor in [go.Scatter, go.Funnel] + hist2d_types: args["x" if orient_v else "y"] = wide_cross_name args["y" if orient_v else "x"] = value_name if constructor != go.Histogram2d: args["color"] = args["color"] or var_name if constructor == go.Bar: if _is_continuous(df_output, value_name): args["x" if orient_v else "y"] = wide_cross_name args["y" if orient_v else "x"] = value_name args["color"] = args["color"] or var_name else: args["x" if orient_v else "y"] = value_name args["y" if orient_v else "x"] = count_name df_output[count_name] = 1 args["color"] = args["color"] or var_name if constructor in [go.Violin, go.Box]: args["x" if orient_v else "y"] = wide_cross_name or var_name args["y" if orient_v else "x"] = value_name if constructor == go.Histogram: args["x" if orient_v else "y"] = value_name args["y" if orient_v else "x"] = wide_cross_name args["color"] = args["color"] or var_name args["data_frame"] = df_output return args
def build_dataframe(args, constructor): """ Constructs a dataframe and modifies `args` in-place. The argument values in `args` can be either strings corresponding to existing columns of a dataframe, or data arrays (lists, numpy arrays, pandas columns, series). Parameters ---------- args : OrderedDict arguments passed to the px function and subsequently modified constructor : graph_object trace class the trace type selected for this figure """ # make copies of all the fields via dict() and list() for field in args: if field in array_attrables and args[field] is not None: args[field] = ( dict(args[field]) if isinstance(args[field], dict) else list(args[field]) ) # Cast data_frame argument to DataFrame (it could be a numpy array, dict etc.) df_provided = args["data_frame"] is not None if df_provided and not isinstance(args["data_frame"], pd.DataFrame): args["data_frame"] = pd.DataFrame(args["data_frame"]) df_input = args["data_frame"] # now we handle special cases like wide-mode or x-xor-y specification # by rearranging args to tee things up for process_args_into_dataframe to work no_x = args.get("x", None) is None no_y = args.get("y", None) is None wide_x = False if no_x else _is_col_list(df_input, args["x"]) wide_y = False if no_y else _is_col_list(df_input, args["y"]) wide_mode = False var_name = None # will likely be "variable" in wide_mode wide_cross_name = None # will likely be "index" in wide_mode value_name = None # will likely be "value" in wide_mode hist2d_types = [go.Histogram2d, go.Histogram2dContour] if constructor in cartesians: if wide_x and wide_y: raise ValueError( "Cannot accept list of column references or list of columns for both `x` and `y`." ) if df_provided and no_x and no_y: wide_mode = True if isinstance(df_input.columns, pd.MultiIndex): raise TypeError( "Data frame columns is a pandas MultiIndex. " "pandas MultiIndex is not supported by plotly express " "at the moment." ) args["wide_variable"] = list(df_input.columns) var_name = df_input.columns.name if var_name in [None, "value", "index"] or var_name in df_input: var_name = "variable" if constructor == go.Funnel: wide_orientation = args.get("orientation", None) or "h" else: wide_orientation = args.get("orientation", None) or "v" args["orientation"] = wide_orientation args["wide_cross"] = None elif wide_x != wide_y: wide_mode = True args["wide_variable"] = args["y"] if wide_y else args["x"] if df_provided and args["wide_variable"] is df_input.columns: var_name = df_input.columns.name if isinstance(args["wide_variable"], pd.Index): args["wide_variable"] = list(args["wide_variable"]) if var_name in [None, "value", "index"] or ( df_provided and var_name in df_input ): var_name = "variable" if constructor == go.Histogram: wide_orientation = "v" if wide_x else "h" else: wide_orientation = "v" if wide_y else "h" args["y" if wide_y else "x"] = None args["wide_cross"] = None if not no_x and not no_y: wide_cross_name = "__x__" if wide_y else "__y__" if wide_mode: value_name = _escape_col_name(df_input, "value", []) var_name = _escape_col_name(df_input, var_name, []) missing_bar_dim = None if constructor in [go.Scatter, go.Bar, go.Funnel] + hist2d_types: if not wide_mode and (no_x != no_y): for ax in ["x", "y"]: if args.get(ax, None) is None: args[ax] = df_input.index if df_provided else Range() if constructor == go.Bar: missing_bar_dim = ax else: if args["orientation"] is None: args["orientation"] = "v" if ax == "x" else "h" if wide_mode and wide_cross_name is None: if no_x != no_y and args["orientation"] is None: args["orientation"] = "v" if no_x else "h" if df_provided: if isinstance(df_input.index, pd.MultiIndex): raise TypeError( "Data frame index is a pandas MultiIndex. " "pandas MultiIndex is not supported by plotly express " "at the moment." ) args["wide_cross"] = df_input.index else: args["wide_cross"] = Range( label=_escape_col_name(df_input, "index", [var_name, value_name]) ) # now that things have been prepped, we do the systematic rewriting of `args` df_output, wide_id_vars = process_args_into_dataframe( args, wide_mode, var_name, value_name ) # now that `df_output` exists and `args` contains only references, we complete # the special-case and wide-mode handling by further rewriting args and/or mutating # df_output count_name = _escape_col_name(df_output, "count", [var_name, value_name]) if not wide_mode and missing_bar_dim and constructor == go.Bar: # now that we've populated df_output, we check to see if the non-missing # dimension is categorical: if so, then setting the missing dimension to a # constant 1 is a less-insane thing to do than setting it to the index by # default and we let the normal auto-orientation-code do its thing later other_dim = "x" if missing_bar_dim == "y" else "y" if not _is_continuous(df_output, args[other_dim]): args[missing_bar_dim] = count_name df_output[count_name] = 1 else: # on the other hand, if the non-missing dimension is continuous, then we # can use this information to override the normal auto-orientation code if args["orientation"] is None: args["orientation"] = "v" if missing_bar_dim == "x" else "h" if constructor in hist2d_types: del args["orientation"] if wide_mode: # at this point, `df_output` is semi-long/semi-wide, but we know which columns # are which, so we melt it and reassign `args` to refer to the newly-tidy # columns, keeping track of various names and manglings set up above wide_value_vars = [c for c in args["wide_variable"] if c not in wide_id_vars] del args["wide_variable"] if wide_cross_name == "__x__": wide_cross_name = args["x"] elif wide_cross_name == "__y__": wide_cross_name = args["y"] else: wide_cross_name = args["wide_cross"] del args["wide_cross"] dtype = None for v in wide_value_vars: v_dtype = df_output[v].dtype.kind v_dtype = "number" if v_dtype in ["i", "f", "u"] else v_dtype if dtype is None: dtype = v_dtype elif dtype != v_dtype: raise ValueError( "Plotly Express cannot process wide-form data with columns of different type." ) df_output = df_output.melt( id_vars=wide_id_vars, value_vars=wide_value_vars, var_name=var_name, value_name=value_name, ) assert len(df_output.columns) == len(set(df_output.columns)), ( "Wide-mode name-inference failure, likely due to a internal bug. " "Please report this to " "https://github.com/plotly/plotly.py/issues/new and we will try to " "replicate and fix it." ) df_output[var_name] = df_output[var_name].astype(str) orient_v = wide_orientation == "v" if constructor in [go.Scatter, go.Funnel] + hist2d_types: args["x" if orient_v else "y"] = wide_cross_name args["y" if orient_v else "x"] = value_name if constructor != go.Histogram2d: args["color"] = args["color"] or var_name if constructor == go.Bar: if _is_continuous(df_output, value_name): args["x" if orient_v else "y"] = wide_cross_name args["y" if orient_v else "x"] = value_name args["color"] = args["color"] or var_name else: args["x" if orient_v else "y"] = value_name args["y" if orient_v else "x"] = count_name df_output[count_name] = 1 args["color"] = args["color"] or var_name if constructor in [go.Violin, go.Box]: args["x" if orient_v else "y"] = wide_cross_name or var_name args["y" if orient_v else "x"] = value_name if constructor == go.Histogram: args["x" if orient_v else "y"] = value_name args["y" if orient_v else "x"] = wide_cross_name args["color"] = args["color"] or var_name args["data_frame"] = df_output return args
8,661
def test_enumerate_configs(tmpdir): """Assert function retrieve only .cfg files by default.""" config_dir = tmpdir.mkdir("config") config_dir.join('config.cfg').write('') config_dir.join('extra.ini').write('') config_dir.join('module.cfg').write('') config_dir.join('README').write('') results = list(run_script.enumerate_configs(config_dir.strpath)) assert 'config.cfg' in results assert 'module.cfg' in results assert 'extra.ini' not in results assert 'README' not in results assert len(results) == 2
def test_enumerate_configs(tmpdir): """Assert function retrieves only .cfg files by default""" config_dir = tmpdir.mkdir("config") config_dir.join('config.cfg').write('') config_dir.join('extra.ini').write('') config_dir.join('module.cfg').write('') config_dir.join('README').write('') results = list(run_script.enumerate_configs(config_dir.strpath)) assert 'config.cfg' in results assert 'module.cfg' in results assert 'extra.ini' not in results assert 'README' not in results assert len(results) == 2
39,154
def wav2vec2_model( extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int], ) -> Wav2Vec2Model: """Build a custom Wav2Vec2Model Note: The "feature extractor" below corresponds to `ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__ in the original ``fairseq`` implementation. This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] paper. The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__, and this is referred as "Transformer" in the paper. Args: extractor_mode (str): Operation mode of feature extractor. Valid values are ``"group_norm"`` or ``"layer_norm"``. If ``"group_norm"``, then a single normalization is applied in the first convolution block. Otherwise, all the convolution blocks will have layer normalization. This option corresponds to ``extractor_mode`` from ``fairseq``. extractor_conv_layer_config (list of integer tuples or None, optional): Configuration of convolution layers in feature extractor. List of convolution configuration, i.e. ``[(output_channel, kernel_size, stride), ...]`` If ``None`` is provided, then the following default value is used. .. code-block:: python [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ] This option corresponds to ``conv_feature_layers`` from ``fairseq``. extractor_conv_bias (bool): Whether to include bias term to each convolution operation. This option corresponds to ``conv_bias`` from ``fairseq``. encoder_embed_dim (int): The dimension of embedding in encoder. This option corresponds to ``encoder_embed_dim`` from ``fairseq``. encoder_projection_dropout (float): The dropout probability applied after the input feature is projected to ``encoder_embed_dim``. This option corresponds to ``dropout_input`` from ``fairseq``. encoder_pos_conv_kernel (int): The kernel size of convolutional positional embeddings. This option corresponds to ``conv_pos`` from ``fairseq``. encoder_pos_conv_groups (int): The number of groups of convolutional positional embeddings. This option corresponds to ``conv_pos_groups`` from ``fairseq``. encoder_num_layers (int): The number of self attention layers in transformer block. This option corresponds to ``encoder_layers`` from ``fairseq``. encoder_num_heads (int): The number of heads in self attention layers. This option corresponds to ``encoder_attention_heads`` from ``fairseq``. encoder_attention_dropout (float): The dropout probability applied after softmax in self-attention layer. This option corresponds to ``attention_dropout`` from ``fairseq``. encoder_ff_interm_features (int): The dimension of hidden features in feed forward layer. This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``. encoder_ff_interm_dropout (float): The dropout probability applied in feedforward layer. This option correspinds to ``activation_dropout`` from ``fairseq``. encoder_dropout (float): The dropout probability applied at the end of feed forward layer. This option corresponds to ``dropout`` from ``fairseq``. encoder_layer_norm_first (bool): Control the order of layer norm in transformer layer and each encoder layer. If True, in transformer layer, layer norm is applied before features are fed to encoder layers. In encoder layer, two layer norms are applied before and after self attention. If False, in transformer layer, layer norm is applied after features are fed to encoder layers. In encoder layer, two layer norms are applied after self attention, before and after feed forward. This option corresponds to ``layer_norm_first`` from ``fairseq``. encoder_layer_drop (float): Probability to drop each encoder layer during training. This option corresponds to ``layerdrop`` from ``fairseq``. aux_num_out (int or None, optional): When provided, attach an extra liner layer on top of encoder, which can be used for fine-tuning. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 if extractor_conv_layer_config is None: extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 feature_extractor = components._get_feature_extractor( extractor_mode, extractor_conv_layer_config, extractor_conv_bias) encoder = components._get_encoder( in_features=extractor_conv_layer_config[-1][0], embed_dim=encoder_embed_dim, dropout_input=encoder_projection_dropout, pos_conv_kernel=encoder_pos_conv_kernel, pos_conv_groups=encoder_pos_conv_groups, num_layers=encoder_num_layers, num_heads=encoder_num_heads, attention_dropout=encoder_attention_dropout, ff_interm_features=encoder_ff_interm_features, ff_interm_dropout=encoder_ff_interm_dropout, dropout=encoder_dropout, layer_norm_first=encoder_layer_norm_first, layer_drop=encoder_layer_drop, ) aux = None if aux_num_out is not None: aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out) return Wav2Vec2Model(feature_extractor, encoder, aux)
def wav2vec2_model( extractor_mode: str, extractor_conv_layer_config: Optional[List[Tuple[int, int, int]]], extractor_conv_bias: bool, encoder_embed_dim: int, encoder_projection_dropout: float, encoder_pos_conv_kernel: int, encoder_pos_conv_groups: int, encoder_num_layers: int, encoder_num_heads: int, encoder_attention_dropout: float, encoder_ff_interm_features: int, encoder_ff_interm_dropout: float, encoder_dropout: float, encoder_layer_norm_first: bool, encoder_layer_drop: float, aux_num_out: Optional[int], ) -> Wav2Vec2Model: """Build a custom Wav2Vec2Model Note: The "feature extractor" below corresponds to `ConvFeatureExtractionModel <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L736>`__ in the original ``fairseq`` implementation. This is referred as "(convolutional) feature encoder" in the *wav2vec 2.0* [:footcite:`baevski2020wav2vec`] paper. The "encoder" below corresponds to `TransformerEncoder <https://github.com/pytorch/fairseq/blob/dd3bd3c0497ae9a7ae7364404a6b0a4c501780b3/fairseq/models/wav2vec/wav2vec2.py#L817>`__, and this is referred as "Transformer" in the paper. Args: extractor_mode (str): Operation mode of feature extractor. Valid values are ``"group_norm"`` or ``"layer_norm"``. If ``"group_norm"``, then a single normalization is applied in the first convolution block. Otherwise, all the convolution blocks will have layer normalization. This option corresponds to ``extractor_mode`` from ``fairseq``. extractor_conv_layer_config (list of integer tuples or None, optional): Configuration of convolution layers in feature extractor. List of convolution configuration, i.e. ``[(output_channel, kernel_size, stride), ...]`` If ``None`` is provided, then the following default value is used. .. code-block:: python [ (512, 10, 5), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 3, 2), (512, 2, 2), (512, 2, 2), ] This option corresponds to ``conv_feature_layers`` from ``fairseq``. extractor_conv_bias (bool): Whether to include bias term to each convolution operation. This option corresponds to ``conv_bias`` from ``fairseq``. encoder_embed_dim (int): The dimension of embedding in encoder. This option corresponds to ``encoder_embed_dim`` from ``fairseq``. encoder_projection_dropout (float): The dropout probability applied after the input feature is projected to ``encoder_embed_dim``. This option corresponds to ``dropout_input`` from ``fairseq``. encoder_pos_conv_kernel (int): The kernel size of convolutional positional embeddings. This option corresponds to ``conv_pos`` from ``fairseq``. encoder_pos_conv_groups (int): The number of groups of convolutional positional embeddings. This option corresponds to ``conv_pos_groups`` from ``fairseq``. encoder_num_layers (int): The number of self attention layers in transformer block. This option corresponds to ``encoder_layers`` from ``fairseq``. encoder_num_heads (int): The number of heads in self attention layers. This option corresponds to ``encoder_attention_heads`` from ``fairseq``. encoder_attention_dropout (float): The dropout probability applied after softmax in self-attention layer. This option corresponds to ``attention_dropout`` from ``fairseq``. encoder_ff_interm_features (int): The dimension of hidden features in feed forward layer. This option corresponds to ``encoder_ffn_embed_dim`` from ``fairseq``. encoder_ff_interm_dropout (float): The dropout probability applied in feedforward layer. This option correspinds to ``activation_dropout`` from ``fairseq``. encoder_dropout (float): The dropout probability applied at the end of feed forward layer. This option corresponds to ``dropout`` from ``fairseq``. encoder_layer_norm_first (bool): Control the order of layer norm in transformer layer and each encoder layer. If True, in transformer layer, layer norm is applied before features are fed to encoder layers. In encoder layer, two layer norms are applied before and after self attention. If False, in transformer layer, layer norm is applied after features are fed to encoder layers. In encoder layer, two layer norms are applied after self attention, before and after feed forward. This option corresponds to ``layer_norm_first`` from ``fairseq``. encoder_layer_drop (float): Probability to drop each encoder layer during training. This option corresponds to ``layerdrop`` from ``fairseq``. aux_num_out (int or None, optional): When provided, attach an extra linear layer on top of encoder, which can be used for fine-tuning. Returns: Wav2Vec2Model: The resulting model. """ # noqa: E501 if extractor_conv_layer_config is None: extractor_conv_layer_config = [(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2 feature_extractor = components._get_feature_extractor( extractor_mode, extractor_conv_layer_config, extractor_conv_bias) encoder = components._get_encoder( in_features=extractor_conv_layer_config[-1][0], embed_dim=encoder_embed_dim, dropout_input=encoder_projection_dropout, pos_conv_kernel=encoder_pos_conv_kernel, pos_conv_groups=encoder_pos_conv_groups, num_layers=encoder_num_layers, num_heads=encoder_num_heads, attention_dropout=encoder_attention_dropout, ff_interm_features=encoder_ff_interm_features, ff_interm_dropout=encoder_ff_interm_dropout, dropout=encoder_dropout, layer_norm_first=encoder_layer_norm_first, layer_drop=encoder_layer_drop, ) aux = None if aux_num_out is not None: aux = torch.nn.Linear(in_features=encoder_embed_dim, out_features=aux_num_out) return Wav2Vec2Model(feature_extractor, encoder, aux)
5,629
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, ftype='butter', output='ba', fs=None): """ IIR digital and analog filter design given order and critical points. Design an Nth-order digital or analog filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). rp : float, optional For Chebyshev and elliptic filters, provides the maximum ripple in the passband. (dB) rs : float, optional For Chebyshev and elliptic filters, provides the minimum attenuation in the stop band. (dB) btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional The type of filter. Default is 'bandpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba', for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirdesign : General filter design using passband and stopband spec Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to 200 Hz and plot the frequency response: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, ... btype='band', analog=True, ftype='cheby2') >>> w, h = signal.freqs(b, a, 1000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() Create a digital filter with the same properties, in a system with sampling rate of 2000 Hz, and plot the frequency response. (Second-order sections implementation is required to ensure stability of a filter of this order): >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', ... analog=False, ftype='cheby2', fs=2000, ... output='sos') >>> w, h = signal.sosfreqz(sos, 2000, fs=2000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() """ ftype, btype, output = [x.lower() for x in (ftype, btype, output)] Wn = asarray(Wn) if fs is not None: if analog: raise ValueError("fs cannot be specified for an analog filter") Wn = 2*Wn/fs try: btype = band_dict[btype] except KeyError: raise ValueError("'%s' is an invalid bandtype for filter." % btype) try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError("'%s' is not a valid basic IIR filter." % ftype) if output not in ['ba', 'zpk', 'sos']: raise ValueError("'%s' is not a valid output form." % output) if rp is not None and rp < 0: raise ValueError("passband ripple (rp) must be positive") if rs is not None and rs < 0: raise ValueError("stopband attenuation (rs) must be positive") # Get analog lowpass prototype if typefunc == buttap: z, p, k = typefunc(N) elif typefunc == besselap: z, p, k = typefunc(N, norm=bessel_norms[ftype]) elif typefunc == cheb1ap: if rp is None: raise ValueError("passband ripple (rp) must be provided to " "design a Chebyshev I filter.") z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError("stopband attenuation (rs) must be provided to " "design an Chebyshev II filter.") z, p, k = typefunc(N, rs) elif typefunc == ellipap: if rs is None or rp is None: raise ValueError("Both rp and rs must be provided to design an " "elliptic filter.") z, p, k = typefunc(N, rp, rs) else: raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) # Pre-warp frequencies for digital filter design if not analog: if numpy.any(Wn <= 0) or numpy.any(Wn >= 1): raise ValueError("Digital filter critical frequencies " "must be 0 < Wn < 1") fs = 2.0 warped = 2 * fs * tan(pi * Wn / fs) else: warped = Wn # transform to lowpass, bandpass, highpass, or bandstop if btype in ('lowpass', 'highpass'): if numpy.size(Wn) != 1: raise ValueError('Must specify a single critical frequency Wn') if btype == 'lowpass': z, p, k = lp2lp_zpk(z, p, k, wo=warped) elif btype == 'highpass': z, p, k = lp2hp_zpk(z, p, k, wo=warped) elif btype in ('bandpass', 'bandstop'): try: bw = warped[1] - warped[0] wo = sqrt(warped[0] * warped[1]) except IndexError: raise ValueError('Wn must specify start and stop frequencies') if btype == 'bandpass': z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) elif btype == 'bandstop': z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) else: raise NotImplementedError("'%s' not implemented in iirfilter." % btype) # Find discrete equivalent if necessary if not analog: z, p, k = bilinear_zpk(z, p, k, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return z, p, k elif output == 'ba': return zpk2tf(z, p, k) elif output == 'sos': return zpk2sos(z, p, k)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, ftype='butter', output='ba', fs=None): """ IIR digital and analog filter design given order and critical points. Design an Nth-order digital or analog filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). rp : float, optional For Chebyshev and elliptic filters, provides the maximum ripple in the passband. (dB) rs : float, optional For Chebyshev and elliptic filters, provides the minimum attenuation in the stop band. (dB) btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional The type of filter. Default is 'bandpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirdesign : General filter design using passband and stopband spec Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to 200 Hz and plot the frequency response: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, ... btype='band', analog=True, ftype='cheby2') >>> w, h = signal.freqs(b, a, 1000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() Create a digital filter with the same properties, in a system with sampling rate of 2000 Hz, and plot the frequency response. (Second-order sections implementation is required to ensure stability of a filter of this order): >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', ... analog=False, ftype='cheby2', fs=2000, ... output='sos') >>> w, h = signal.sosfreqz(sos, 2000, fs=2000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() """ ftype, btype, output = [x.lower() for x in (ftype, btype, output)] Wn = asarray(Wn) if fs is not None: if analog: raise ValueError("fs cannot be specified for an analog filter") Wn = 2*Wn/fs try: btype = band_dict[btype] except KeyError: raise ValueError("'%s' is an invalid bandtype for filter." % btype) try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError("'%s' is not a valid basic IIR filter." % ftype) if output not in ['ba', 'zpk', 'sos']: raise ValueError("'%s' is not a valid output form." % output) if rp is not None and rp < 0: raise ValueError("passband ripple (rp) must be positive") if rs is not None and rs < 0: raise ValueError("stopband attenuation (rs) must be positive") # Get analog lowpass prototype if typefunc == buttap: z, p, k = typefunc(N) elif typefunc == besselap: z, p, k = typefunc(N, norm=bessel_norms[ftype]) elif typefunc == cheb1ap: if rp is None: raise ValueError("passband ripple (rp) must be provided to " "design a Chebyshev I filter.") z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError("stopband attenuation (rs) must be provided to " "design an Chebyshev II filter.") z, p, k = typefunc(N, rs) elif typefunc == ellipap: if rs is None or rp is None: raise ValueError("Both rp and rs must be provided to design an " "elliptic filter.") z, p, k = typefunc(N, rp, rs) else: raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) # Pre-warp frequencies for digital filter design if not analog: if numpy.any(Wn <= 0) or numpy.any(Wn >= 1): raise ValueError("Digital filter critical frequencies " "must be 0 < Wn < 1") fs = 2.0 warped = 2 * fs * tan(pi * Wn / fs) else: warped = Wn # transform to lowpass, bandpass, highpass, or bandstop if btype in ('lowpass', 'highpass'): if numpy.size(Wn) != 1: raise ValueError('Must specify a single critical frequency Wn') if btype == 'lowpass': z, p, k = lp2lp_zpk(z, p, k, wo=warped) elif btype == 'highpass': z, p, k = lp2hp_zpk(z, p, k, wo=warped) elif btype in ('bandpass', 'bandstop'): try: bw = warped[1] - warped[0] wo = sqrt(warped[0] * warped[1]) except IndexError: raise ValueError('Wn must specify start and stop frequencies') if btype == 'bandpass': z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) elif btype == 'bandstop': z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) else: raise NotImplementedError("'%s' not implemented in iirfilter." % btype) # Find discrete equivalent if necessary if not analog: z, p, k = bilinear_zpk(z, p, k, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return z, p, k elif output == 'ba': return zpk2tf(z, p, k) elif output == 'sos': return zpk2sos(z, p, k)
12,460
def refine_type(ti: Type, si: Type) -> Type: """Refine `ti` by replacing Anys in it with information taken from `si` This basically works by, when the types have the same structure, traversing both of them in parallel and replacing Any on the left with whatever the type on the right is. If the types don't have the same structure (or aren't supported), the left type is chosen. For example: refine(Any, T) = T, for all T refine(float, int) = float refine(List[Any], List[int]) = List[int] refine(Dict[int, Any], Dict[Any, int]) = Dict[int, int] refine(Tuple[int, Any], Tuple[Any, int]) = Tuple[int, int] refine(Callable[[Any], Any], Callable[[int], int]) = Callable[[int], int] refine(Callable[..., int], Callable[[int, float], Any]) = Callable[[int, float], int] refine(Optional[Any], int) = Optional[int] refine(Optional[Any], Optional[int]) = Optional[int] refine(Optional[Any], Union[int, str]) = Optional[Union[int, str]] refine(Optional[List[Any]], List[int]) = List[int] """ t = get_proper_type(ti) s = get_proper_type(si) if isinstance(t, AnyType): # If s is also an any, we return t in case it is a missing_import any return s if not isinstance(s, AnyType) else t if isinstance(t, Instance) and isinstance(s, Instance) and t.type == s.type: return t.copy_modified(args=[refine_type(ta, sa) for ta, sa in zip(t.args, s.args)]) if ( isinstance(t, TupleType) and isinstance(s, TupleType) and t.partial_fallback == s.partial_fallback and len(t.items) == len(s.items) ): return t.copy_modified(items=[refine_type(ta, sa) for ta, sa in zip(t.items, s.items)]) if isinstance(t, CallableType) and isinstance(s, CallableType): return refine_callable(t, s) if isinstance(t, UnionType): return refine_union(t, s) # TODO: Refining of builtins.tuple, Type? return t
def refine_type(ti: Type, si: Type) -> Type: """Refine `ti` by replacing Anys in it with information taken from `si` This basically works by, when the types have the same structure, traversing both of them in parallel and replacing Any on the left with whatever the type on the right is. If the types don't have the same structure (or aren't supported), the left type is chosen. For example: refine(Any, T) = T, for all T refine(float, int) = float refine(List[Any], List[int]) = List[int] refine(Dict[int, Any], Dict[Any, int]) = Dict[int, int] refine(Tuple[int, Any], Tuple[Any, int]) = Tuple[int, int] refine(Callable[[Any], Any], Callable[[int], int]) = Callable[[int], int] refine(Callable[..., int], Callable[[int, float], Any]) = Callable[[int, float], int] refine(Optional[Any], int) = Optional[int] refine(Optional[Any], Optional[int]) = Optional[int] refine(Optional[Any], Union[int, str]) = Optional[Union[int, str]] refine(Optional[List[Any]], List[int]) = List[int] """ t = get_proper_type(ti) s = get_proper_type(si) if isinstance(t, AnyType): # If s is also an Any, we return t in case it is a missing_import Any return s if not isinstance(s, AnyType) else t if isinstance(t, Instance) and isinstance(s, Instance) and t.type == s.type: return t.copy_modified(args=[refine_type(ta, sa) for ta, sa in zip(t.args, s.args)]) if ( isinstance(t, TupleType) and isinstance(s, TupleType) and t.partial_fallback == s.partial_fallback and len(t.items) == len(s.items) ): return t.copy_modified(items=[refine_type(ta, sa) for ta, sa in zip(t.items, s.items)]) if isinstance(t, CallableType) and isinstance(s, CallableType): return refine_callable(t, s) if isinstance(t, UnionType): return refine_union(t, s) # TODO: Refining of builtins.tuple, Type? return t
2,889
def count_nonzero(X, axis=None, sample_weight=None): """A variant of X.getnnz() with extension to weighting on axis 0. Useful in efficiently calculating multilabel metrics. Parameters ---------- X : sparse matrix of shape (n_samples, n_labels) Input data. It should be of CSR format. axis : {0, 1}, default=None The axis on which the data is aggregated. sample_weight : array-like of shape (n_samples,), default=None Weight for each row of X. Returns ------- int or array of int Number of non-zero values in the array along a given axis. Otherwise, the total number of non-zero values in the array is returned. """ if axis == -1: axis = 1 elif axis == -2: axis = 0 elif X.format != "csr": raise TypeError("Expected CSR sparse format, got {0}".format(X.format)) # We rely here on the fact that np.diff(Y.indptr) for a CSR # will return the number of nonzero entries in each row. # A bincount over Y.indices will return the number of nonzeros # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14. if axis is None: if sample_weight is None: return X.nnz else: return np.dot(np.diff(X.indptr), sample_weight) elif axis == 1: out = np.diff(X.indptr) if sample_weight is None: # astype here is for consistency with axis=0 dtype return out.astype("intp") return out * sample_weight elif axis == 0: if sample_weight is None: return np.bincount(X.indices, minlength=X.shape[1]) else: weights = np.repeat(sample_weight, np.diff(X.indptr)) return np.bincount(X.indices, minlength=X.shape[1], weights=weights) else: raise ValueError("Unsupported axis: {0}".format(axis))
def count_nonzero(X, axis=None, sample_weight=None): """A variant of X.getnnz() with extension to weighting on axis 0. Useful in efficiently calculating multilabel metrics. Parameters ---------- X : sparse matrix of shape (n_samples, n_labels) Input data. It should be of CSR format. axis : {0, 1}, default=None The axis on which the data is aggregated. sample_weight : array-like of shape (n_samples,), default=None Weight for each row of X. Returns ------- nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,) Number of non-zero values in the array along a given axis. Otherwise, the total number of non-zero values in the array is returned. """ if axis == -1: axis = 1 elif axis == -2: axis = 0 elif X.format != "csr": raise TypeError("Expected CSR sparse format, got {0}".format(X.format)) # We rely here on the fact that np.diff(Y.indptr) for a CSR # will return the number of nonzero entries in each row. # A bincount over Y.indices will return the number of nonzeros # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14. if axis is None: if sample_weight is None: return X.nnz else: return np.dot(np.diff(X.indptr), sample_weight) elif axis == 1: out = np.diff(X.indptr) if sample_weight is None: # astype here is for consistency with axis=0 dtype return out.astype("intp") return out * sample_weight elif axis == 0: if sample_weight is None: return np.bincount(X.indices, minlength=X.shape[1]) else: weights = np.repeat(sample_weight, np.diff(X.indptr)) return np.bincount(X.indices, minlength=X.shape[1], weights=weights) else: raise ValueError("Unsupported axis: {0}".format(axis))
46,204
def imread(filename: str): """custom imaplementation of imread to avoid skimage dependecy""" ext = os.path.splitext(filename)[1] if ext in [".tif", "tiff", ".lsm"]: import tifffile image = tifffile.imread(filename) else: import imageio image = imageio.imread(filename) if not hasattr(image, 'ndim'): return image if image.ndim > 2: if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4): image = np.swapaxes(image, -1, -3) image = np.swapaxes(image, -2, -3) return image
def imread(filename: str) -> np.ndarray: """custom imaplementation of imread to avoid skimage dependecy""" ext = os.path.splitext(filename)[1] if ext in [".tif", "tiff", ".lsm"]: import tifffile image = tifffile.imread(filename) else: import imageio image = imageio.imread(filename) if not hasattr(image, 'ndim'): return image if image.ndim > 2: if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4): image = np.swapaxes(image, -1, -3) image = np.swapaxes(image, -2, -3) return image
27,902
def logical_xor(xp, a, b): return xp.logical_or(a, b)
def logical_xor(xp, a, b): return xp.logical_xor(a, b)
10,831
def remove_unnecessary_nrt_usage(function, context, fndesc): """ Remove unnecessary NRT incref/decref in the given LLVM function. It uses highlevel type info to determine if the function does not need NRT. Such a function does not: - return array object; - take arguments that need refcount except array; - call function that return refcounted object. In effect, the function will not capture or create references that extend the lifetime of any refcounted objects beyound the lifetime of the function. The rewrite performs inplace. If rewrite has happen, this function return True. Otherwise, return False. """ dmm = context.data_model_manager if _legalize(function.module, dmm, fndesc): _rewrite_function(function) return True else: return False
def remove_unnecessary_nrt_usage(function, context, fndesc): """ Remove unnecessary NRT incref/decref in the given LLVM function. It uses highlevel type info to determine if the function does not need NRT. Such a function does not: - return array object; - take arguments that need refcount except array; - call function(s) that return refcounted object. In effect, the function will not capture or create references that extend the lifetime of any refcounted objects beyound the lifetime of the function. The rewrite performs inplace. If rewrite has happen, this function return True. Otherwise, return False. """ dmm = context.data_model_manager if _legalize(function.module, dmm, fndesc): _rewrite_function(function) return True else: return False
2,001
def _weighted_percentile(array, sample_weight, percentile=50, interpolation="nearest"): """Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {"linear", "lower", "higher", "nearest"}, default="lower" The interpolation method to use when the percentile lies between data points `i` and `j`: * `"linear"`: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `"lower"`: i`; * `"higher"`: `j`; * `"nearest"`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """ possible_interpolation = ("linear", "lower", "higher", "nearest") if interpolation not in possible_interpolation: raise ValueError( f"'interpolation' should be one of " f"{', '.join(possible_interpolation)}. Got '{interpolation}' " f"instead." ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( "All weights cannot be null when computing a weighted percentile." ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternative: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtained a strictly monotically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid="ignore"): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in ("lower", "higher", "nearest"): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side="left") for col in range(n_cols) ]) if interpolation == "lower" and np.all(percentile < 1): # P = 100 is a corner case for "lower" percentile_idx -= 1 elif interpolation == "nearest" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == "linear" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value
def _weighted_percentile(array, sample_weight, percentile=50, interpolation="nearest"): """Compute weighted percentile Computes lower weighted percentile. If `array` is a 2D array, the `percentile` is computed along the axis 0. .. versionchanged:: 0.24 Accepts 2D `array`. Parameters ---------- array : ndarray of shape (n,) or (n, m) Values to take the weighted percentile of. sample_weight: ndarray of (n,) or (n, m) Weights for each value in `array`. Must be same shape as `array` or of shape `(array.shape[0],)`. percentile: inr or float, default=50 Percentile to compute. Must be value between 0 and 100. interpolation : {"linear", "lower", "higher", "nearest"}, default="lower" The interpolation method to use when the percentile lies between data points `i` and `j`: * "linear": `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`; * `"lower"`: i`; * `"higher"`: `j`; * `"nearest"`: `i` or `j`, whichever is nearest (default). .. versionadded: 0.24 Returns ------- percentile_value : float or int if `array` of shape (n,), otherwise\ ndarray of shape (m,) Weighted percentile. """ possible_interpolation = ("linear", "lower", "higher", "nearest") if interpolation not in possible_interpolation: raise ValueError( f"'interpolation' should be one of " f"{', '.join(possible_interpolation)}. Got '{interpolation}' " f"instead." ) if np.any(np.count_nonzero(sample_weight, axis=0) < 1): raise ValueError( "All weights cannot be null when computing a weighted percentile." ) n_dim = array.ndim if n_dim == 0: return array[()] if array.ndim == 1: array = array.reshape((-1, 1)) if (array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]): # when `sample_weight` is 1D, we repeat it for each column of `array` sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T n_rows, n_cols = array.shape sorted_idx = np.argsort(array, axis=0) sorted_weights = _take_along_axis(sample_weight, sorted_idx, axis=0) percentile = np.array([percentile / 100] * n_cols) cum_weigths = stable_cumsum(sorted_weights, axis=0) def _squeeze_arr(arr, n_dim): return arr[0] if n_dim == 1 else arr # Percentile can be computed with 3 different alternative: # https://en.wikipedia.org/wiki/Percentile # These 3 alternatives depend of the value of a parameter C. NumPy uses # the variant where C=0 which allows to obtained a strictly monotically # increasing function which is defined as: # P = (x - 1) / (N - 1); x in [1, N] # Weighted percentile change this formula by taking into account the # weights instead of the data frequency. # P_w = (x - w) / (S_w - w), x in [1, N], w being the weight and S_w being # the sum of the weights. adjusted_percentile = (cum_weigths - sorted_weights) with np.errstate(invalid="ignore"): adjusted_percentile /= cum_weigths[-1] - sorted_weights nan_mask = np.isnan(adjusted_percentile) adjusted_percentile[nan_mask] = 1 if interpolation in ("lower", "higher", "nearest"): percentile_idx = np.array([ np.searchsorted(adjusted_percentile[:, col], percentile[col], side="left") for col in range(n_cols) ]) if interpolation == "lower" and np.all(percentile < 1): # P = 100 is a corner case for "lower" percentile_idx -= 1 elif interpolation == "nearest" and np.all(percentile < 1): for col in range(n_cols): error_higher = abs( adjusted_percentile[percentile_idx[col], col] - percentile[col] ) error_lower = abs( adjusted_percentile[percentile_idx[col] - 1, col] - percentile[col] ) if error_higher >= error_lower: percentile_idx[col] -= 1 percentile_idx = np.apply_along_axis( lambda x: np.clip(x, 0, n_rows - 1), axis=0, arr=percentile_idx ) percentile_value = array[ sorted_idx[percentile_idx, np.arange(n_cols)], np.arange(n_cols) ] percentile_value = _squeeze_arr(percentile_value, n_dim) else: # interpolation == "linear" percentile_value = np.array([ np.interp( x=percentile[col], xp=adjusted_percentile[:, col], fp=array[sorted_idx[:, col], col], ) for col in range(n_cols) ]) percentile_value = _squeeze_arr(percentile_value, n_dim) single_sample_weight = np.count_nonzero(sample_weight, axis=0) if np.any(single_sample_weight == 1): # edge case where a single weight is non-null in which case the # previous methods will fail if not isinstance(percentile_value, Iterable): percentile_value = _squeeze_arr( array[np.nonzero(sample_weight)], n_dim ) else: percentile_value = np.array([ array[np.flatnonzero(sample_weight[:, col])[0], col] if n_nonzero == 1 else percentile_value[col] for col, n_nonzero in enumerate(single_sample_weight) ]) return percentile_value
47,016
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() check_output_dir(training_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(training_args, p, None): assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(config, p, getattr(training_args, p)) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=".ckpt" in model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(model, data_args.task) # set num_beams for evaluation if data_args.eval_beams is None: data_args.eval_beams = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang] if model_args.freeze_embeds: freeze_embeds(model) if model_args.freeze_encoder: freeze_params(model.get_encoder()) assert_all_frozen(model.get_encoder()) dataset_class = Seq2SeqDataset # Get datasets train_dataset = ( dataset_class( tokenizer, type_path="train", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_train else None ) eval_dataset = ( dataset_class( tokenizer, type_path="val", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) test_dataset = ( dataset_class( tokenizer, type_path="test", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_predict else None ) # Initialize our Trainer compute_metrics_fn = ( build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None ) trainer = Seq2SeqTrainer( model=model, config=config, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=Seq2SeqDataCollator(tokenizer, data_args, training_args.tpu_num_cores), compute_metrics=compute_metrics_fn, data_args=data_args, ) all_metrics = {} # Training if training_args.do_train: t0 = time.time() trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) metrics = speed_metrics("train", t0, data_args.n_train) trainer.save_model() if trainer.is_world_process_zero(): handle_metrics("train", metrics, training_args.output_dir) all_metrics.update(metrics) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") t0 = time.time() metrics = trainer.evaluate(metric_key_prefix="val") metrics.update(speed_metrics("val", t0, data_args.n_val)) metrics["val_loss"] = round(metrics["val_loss"], 4) if trainer.is_world_process_zero(): handle_metrics("val", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.do_predict: logging.info("*** Test ***") t0 = time.time() test_output = trainer.predict(test_dataset=test_dataset, metric_key_prefix="test") metrics = test_output.metrics metrics.update(speed_metrics("test", t0, data_args.n_test)) if trainer.is_world_process_zero(): metrics["test_loss"] = round(metrics["test_loss"], 4) handle_metrics("test", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.predict_with_generate: test_preds = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) test_preds = lmap(str.strip, test_preds) write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt")) if trainer.is_world_process_zero(): save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json")) return all_metrics
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() check_output_dir(training_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(training_args, p, None): assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(config, p, getattr(training_args, p)) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=".ckpt" in model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(model, data_args.task) # set num_beams for evaluation if data_args.eval_beams is None: data_args.eval_beams = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(tokenizer, MBartTokenizer): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang] if model_args.freeze_embeds: freeze_embeds(model) if model_args.freeze_encoder: freeze_params(model.get_encoder()) assert_all_frozen(model.get_encoder()) dataset_class = Seq2SeqDataset # Get datasets train_dataset = ( dataset_class( tokenizer, type_path="train", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_train else None ) eval_dataset = ( dataset_class( tokenizer, type_path="val", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) test_dataset = ( dataset_class( tokenizer, type_path="test", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_predict else None ) # Initialize our Trainer compute_metrics_fn = ( build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None ) trainer = Seq2SeqTrainer( model=model, config=config, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=Seq2SeqDataCollator(tokenizer, data_args, training_args.tpu_num_cores), compute_metrics=compute_metrics_fn, data_args=data_args, ) all_metrics = {} # Training if training_args.do_train: start_time = time.time() trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) metrics = speed_metrics("train", t0, data_args.n_train) trainer.save_model() if trainer.is_world_process_zero(): handle_metrics("train", metrics, training_args.output_dir) all_metrics.update(metrics) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") t0 = time.time() metrics = trainer.evaluate(metric_key_prefix="val") metrics.update(speed_metrics("val", t0, data_args.n_val)) metrics["val_loss"] = round(metrics["val_loss"], 4) if trainer.is_world_process_zero(): handle_metrics("val", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.do_predict: logging.info("*** Test ***") t0 = time.time() test_output = trainer.predict(test_dataset=test_dataset, metric_key_prefix="test") metrics = test_output.metrics metrics.update(speed_metrics("test", t0, data_args.n_test)) if trainer.is_world_process_zero(): metrics["test_loss"] = round(metrics["test_loss"], 4) handle_metrics("test", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.predict_with_generate: test_preds = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) test_preds = lmap(str.strip, test_preds) write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt")) if trainer.is_world_process_zero(): save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json")) return all_metrics
13,790
def track_request_user_changes(request): """ Instrument the request object so that we store changes to the `user` attribute for future logging if needed for debugging user mismatches. This is done by changing the `__class__` attribute of the request object to point to a new class we created on the fly which is exactly the same as the underlying request class but with an override for the `__setattr__` function to catch the attribute chages. """ original_user = getattr(request, 'user', None) class SafeSessionRequestWrapper(request.__class__): """ A wrapper class for the request object. """ user_changes = [] def __setattr__(self, name, value): nonlocal original_user if name == 'user': stack = inspect.stack() # Written this way in case you need more of the stack for debugging. location = "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack[0:12]) if not hasattr(request, name): original_user = value if hasattr(value, 'id'): self.user_changes.append( f"SafeCookieData: Setting for the first time: {value.id!r}\n" f"{location}" ) else: self.user_changes.append( f"SafeCookieData: Setting for the first time, but user has no id: {value!r}\n" f"{location}" ) elif value != getattr(request, name): current_user = getattr(request, name) if hasattr(value, 'id'): self.user_changes.append( f"SafeCookieData: Changing request user. " f"Originally {original_user.id!r}, now {current_user.id!r} and will become {value.id!r}\n" f"{location}" ) else: self.user_changes.append( f"SafeCookieData: Changing request user but user has no id. " f"Originally {original_user!r}, now {current_user!r} and will become {value!r}\n" f"{location}" ) else: # Value being set but not actually changing. pass return super().__setattr__(name, value) request.__class__ = SafeSessionRequestWrapper
def track_request_user_changes(request): """ Instrument the request object so that we store changes to the `user` attribute for future logging if needed for debugging user mismatches. This is done by changing the `__class__` attribute of the request object to point to a new class we created on the fly which is exactly the same as the underlying request class but with an override for the `__setattr__` function to catch the attribute changes. """ original_user = getattr(request, 'user', None) class SafeSessionRequestWrapper(request.__class__): """ A wrapper class for the request object. """ user_changes = [] def __setattr__(self, name, value): nonlocal original_user if name == 'user': stack = inspect.stack() # Written this way in case you need more of the stack for debugging. location = "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack[0:12]) if not hasattr(request, name): original_user = value if hasattr(value, 'id'): self.user_changes.append( f"SafeCookieData: Setting for the first time: {value.id!r}\n" f"{location}" ) else: self.user_changes.append( f"SafeCookieData: Setting for the first time, but user has no id: {value!r}\n" f"{location}" ) elif value != getattr(request, name): current_user = getattr(request, name) if hasattr(value, 'id'): self.user_changes.append( f"SafeCookieData: Changing request user. " f"Originally {original_user.id!r}, now {current_user.id!r} and will become {value.id!r}\n" f"{location}" ) else: self.user_changes.append( f"SafeCookieData: Changing request user but user has no id. " f"Originally {original_user!r}, now {current_user!r} and will become {value!r}\n" f"{location}" ) else: # Value being set but not actually changing. pass return super().__setattr__(name, value) request.__class__ = SafeSessionRequestWrapper
9,415
def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default='27017'), login_database=dict(default=None), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), name=dict(required=True, aliases=['user']), password=dict(aliases=['pass'], no_log=True), ssl=dict(default=False, type='bool'), ssl_certfile=dict(default=None), ssl_keyfile=dict(default=None), ssl_ca_certs=dict(default=None), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default="always", choices=["always", "on_create"]), ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']), ), supports_check_mode=True ) if not pymongo_found: module.fail_json(msg='the python pymongo module is required') login_user = module.params['login_user'] login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] login_database = module.params['login_database'] replica_set = module.params['replica_set'] db_name = module.params['database'] user = module.params['name'] password = module.params['password'] ssl = module.params['ssl'] ssl_certfile = module.params['ssl_certfile'] ssl_keyfile = module.params['ssl_keyfile'] ssl_ca_certs = module.params['ssl_ca_certs'] roles = module.params['roles'] or [] state = module.params['state'] update_password = module.params['update_password'] try: connection_params = { "host": login_host, "port": int(login_port), } if replica_set: connection_params["replicaset"] = replica_set if ssl: connection_params["ssl"] = ssl connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs']) if ssl_certfile: connection_params['ssl_certfile'] = ssl_certfile if ssl_keyfile: connection_params['ssl_keyfile'] = ssl_keyfile if ssl_ca_certs: connection_params['ssl_ca_certs'] = ssl_ca_certs client = MongoClient(**connection_params) # NOTE: this check must be done ASAP. # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6) if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'): check_compatibility(module, client) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password, source=login_database) elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): if db_name != "admin": module.fail_json(msg='The localhost login exception only allows the first admin account to be created') # else: this has to be the first admin user added except Exception as e: module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) if state == 'present': if password is None and update_password == 'always': module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') try: if update_password != 'always': uinfo = user_find(client, user, db_name) if uinfo: password = None if not check_if_roles_changed(uinfo, roles, db_name): module.exit_json(changed=False, user=user) if module.check_mode: module.exit_json(changed=True, user=user) user_add(module, client, db_name, user, password, roles) except Exception as e: module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc()) # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848 # newuinfo = user_find(client, user, db_name) # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere: # module.exit_json(changed=False, user=user) elif state == 'absent': try: user_remove(module, client, db_name, user) except Exception as e: module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc()) module.exit_json(changed=True, user=user)
def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default='27017'), login_database=dict(default=None), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), name=dict(required=True, aliases=['user']), password=dict(aliases=['pass'], no_log=True), ssl=dict(default=False, type='bool'), ssl_certfile=dict(type='path'), ssl_keyfile=dict(default=None), ssl_ca_certs=dict(default=None), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default="always", choices=["always", "on_create"]), ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']), ), supports_check_mode=True ) if not pymongo_found: module.fail_json(msg='the python pymongo module is required') login_user = module.params['login_user'] login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] login_database = module.params['login_database'] replica_set = module.params['replica_set'] db_name = module.params['database'] user = module.params['name'] password = module.params['password'] ssl = module.params['ssl'] ssl_certfile = module.params['ssl_certfile'] ssl_keyfile = module.params['ssl_keyfile'] ssl_ca_certs = module.params['ssl_ca_certs'] roles = module.params['roles'] or [] state = module.params['state'] update_password = module.params['update_password'] try: connection_params = { "host": login_host, "port": int(login_port), } if replica_set: connection_params["replicaset"] = replica_set if ssl: connection_params["ssl"] = ssl connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs']) if ssl_certfile: connection_params['ssl_certfile'] = ssl_certfile if ssl_keyfile: connection_params['ssl_keyfile'] = ssl_keyfile if ssl_ca_certs: connection_params['ssl_ca_certs'] = ssl_ca_certs client = MongoClient(**connection_params) # NOTE: this check must be done ASAP. # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6) if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'): check_compatibility(module, client) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] elif login_password is None or login_user is None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password, source=login_database) elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'): if db_name != "admin": module.fail_json(msg='The localhost login exception only allows the first admin account to be created') # else: this has to be the first admin user added except Exception as e: module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc()) if state == 'present': if password is None and update_password == 'always': module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create') try: if update_password != 'always': uinfo = user_find(client, user, db_name) if uinfo: password = None if not check_if_roles_changed(uinfo, roles, db_name): module.exit_json(changed=False, user=user) if module.check_mode: module.exit_json(changed=True, user=user) user_add(module, client, db_name, user, password, roles) except Exception as e: module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc()) # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848 # newuinfo = user_find(client, user, db_name) # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere: # module.exit_json(changed=False, user=user) elif state == 'absent': try: user_remove(module, client, db_name, user) except Exception as e: module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc()) module.exit_json(changed=True, user=user)
47,972
def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--model_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with IR model files') parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place compiled models files into') parser.add_argument('--name', metavar='PAT[,PAT...]', help='compile only models whose names match at least one of the specified patterns') parser.add_argument('--list', type=Path, metavar='FILE.LST', help='compile only models whose names match at least one of the patterns in the specified file') parser.add_argument('-ip', '--input_precision', dest='input_precision', help='Input precision of compiled network') parser.add_argument('-op', '--output_precision', dest='output_precision', help='output_precision of compiled network') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='compile only specified precisions') parser.add_argument('--target_device', help='target device for the compiled model', default='MYRIAD') parser.add_argument('--all', action='store_true', help='compile all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument('--compiler', type=Path, help='Compile Tool executable entry point') parser.add_argument('--dry_run', action='store_true', help='print the compilation commands without running them') args = parser.parse_args() compiler_path = args.compiler if compiler_path is None: try: compiler_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/compile_tool/compile_tool' except KeyError: sys.exit('Unable to locate Compile Tool. ' + 'Use --compiler or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.') models = common.load_models_from_args(parser, args) if args.precisions is None: requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) unknown_precisions = requested_precisions - common.KNOWN_COMPILABLE_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) reporter = common.Reporter(common.DirectOutputContext()) output_dir = args.model_dir if args.output_dir is None else args.output_dir requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS failed_models = [] for model in models: if not model.compilable: reporter.print_section_heading('Skipping {} (compilation not supported)', model.name) reporter.print() continue for precision in sorted(requested_precisions): if not compile(reporter, compiler_path, model, precision, args, output_dir): failed_models.append(model.name + ' (' + precision + ')') continue if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--model_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with IR model files') parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place compiled models files into') parser.add_argument('--name', metavar='PAT[,PAT...]', help='compile only models whose names match at least one of the specified patterns') parser.add_argument('--list', type=Path, metavar='FILE.LST', help='compile only models whose names match at least one of the patterns in the specified file') parser.add_argument('-ip', '--input_precision', dest='input_precision', help='Input precision of compiled network') parser.add_argument('-op', '--output_precision', dest='output_precision', help='output_precision of compiled network') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='compile only specified precisions') parser.add_argument('--target_device', help='target device for the compiled model', default='MYRIAD') parser.add_argument('--all', action='store_true', help='compile all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument('--compiler', type=Path, help='Compile Tool executable entry point') parser.add_argument('--dry_run', action='store_true', help='print the compilation commands without running them') args = parser.parse_args() compiler_path = args.compiler if compiler_path is None: try: compiler_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/compile_tool/compile_tool' except KeyError: sys.exit('Unable to locate Compile Tool. ' + 'Use --compiler or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.') models = common.load_models_from_args(parser, args) if args.precisions is None: requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) unknown_precisions = requested_precisions - common.KNOWN_COMPILABLE_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) reporter = common.Reporter(common.DirectOutputContext()) output_dir = args.model_dir if args.output_dir is None else args.output_dir requested_precisions = common.KNOWN_COMPILABLE_PRECISIONS failed_models = [] for model in models: if not model.compilable: reporter.print_section_heading('Skipping {} (compilation not supported)', model.name) reporter.print() continue for precision in sorted(requested_precisions): if not compile(reporter, compiler_path, model, precision, args, output_dir): failed_models.append(f'{model.name} ({precision})') continue if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1)
42,915
def clique_swap(clique: list, graph: nx.Graph, node_select: str = "uniform"): """If possible, generates a new clique by swapping a node in the input clique with a node outside the clique. Proceeds by calculating the set :math:`C_1` of nodes in the rest of the graph that are connected to all but one of the nodes in the clique. If this set is not empty, this function randomly picks a node and swaps it with the corresponding node in the clique that is not connected to it. The set :math:`C_1` and corresponding nodes in the clique are provided by the :func:`~strawberryfields.apps.graph.utils.c_1` function. Whenever there are multiple nodes within :math:`C_1`, one must choose which node to add to the growing clique. This function allows a method of choosing nodes to be set with the ``node_select`` argument, with node selection based on uniform randomness and node degree supported. Degree-based node selection involves picking the node with the greatest degree, with ties settled by uniform random choice. Args: clique (list[int]): a subgraph specified by a list of nodes; the subgraph must be a clique graph (nx.Graph): the input graph node_select (str): method of selecting nodes from :math:`C_0` during growth. Can be either ``"uniform"`` for uniform random selection or ``"degree"`` for degree-based selection. Defaults to ``"uniform"``. Returns: list[int]: a new clique subgraph of equal size as the input """ if not utils.is_clique(graph.subgraph(clique)): raise ValueError("Input subgraph is not a clique") clique = set(clique) c_1 = utils.c_1(clique, graph) if c_1: if node_select == "uniform": swap_nodes = c_1[np.random.choice(len(c_1))] elif node_select == "degree": degrees = np.array([graph.degree(n[1]) for n in c_1]) to_swap_index = np.random.choice(np.where(degrees == degrees.max())[0]) swap_nodes = c_1[to_swap_index] else: raise ValueError("Node selection method not recognized") clique.remove(swap_nodes[0]) clique.add(swap_nodes[1]) return sorted(clique)
def clique_swap(clique: list, graph: nx.Graph, node_select: str = "uniform"): """If possible, generates a new clique by swapping a node in the input clique with a node outside the clique. Proceeds by calculating the set :math:`C_1` of nodes in the rest of the graph that are connected to all but one of the nodes in the clique. If this set is not empty, this function randomly picks a node and swaps it with the corresponding node in the clique that is not connected to it. The set :math:`C_1` and corresponding nodes in the clique are provided by the :func:`~strawberryfields.apps.graph.utils.c_1` function. Whenever there are multiple nodes within :math:`C_1`, one must choose which node to add to the growing clique. This function allows a method of choosing nodes to be set with the ``node_select`` argument, with node selection based on uniform randomness and node degree supported. Degree-based node selection involves picking the node with the greatest degree, with ties settled by uniform random choice. Args: clique (list[int]): a subgraph specified by a list of nodes; the subgraph must be a clique graph (nx.Graph): the input graph node_select (str): method of selecting nodes from :math:`C_0` during growth. Can be either ``"uniform"`` for uniform random selection or ``"degree"`` for degree-based selection. Defaults to ``"uniform"``. Returns: list[int]: a new clique subgraph of equal size as the input """ if not utils.is_clique(graph.subgraph(clique)): raise ValueError("Input subgraph is not a clique") clique = set(clique) c_1 = utils.c_1(clique, graph) if c_1: if node_select == "uniform": swap_nodes = np.random.choice(c_1) elif node_select == "degree": degrees = np.array([graph.degree(n[1]) for n in c_1]) to_swap_index = np.random.choice(np.where(degrees == degrees.max())[0]) swap_nodes = c_1[to_swap_index] else: raise ValueError("Node selection method not recognized") clique.remove(swap_nodes[0]) clique.add(swap_nodes[1]) return sorted(clique)
1,500
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None, labels=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. The log loss is only defined for two or more labels. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,) Predicted probabilities, as returned by a classifier's predict_proba method. If ``y_pred.shape = (n_samples,)`` the probabilities provided are assumed to be that of the positive class. The labels in ``y_pred`` are assumed to be ordered alphabetically, as done by :class:`preprocessing.LabelBinarizer`. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like, optional (default=None) If not provided, labels will be inferred from y_true. If ``labels`` is ``None`` and ``y_pred`` has shape (n_samples,) the labels are assumed to be binary and are inferred from ``y_true``. .. versionadded:: 0.18 Returns ------- loss : float Examples -------- >>> from sklearn.metrics import log_loss >>> log_loss(["spam", "ham", "ham", "spam"], ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ y_pred = check_array(y_pred, ensure_2d=False) check_consistent_length(y_pred, y_true, sample_weight) lb = LabelBinarizer() if labels is not None: lb.fit(labels) else: lb.fit(y_true) if len(lb.classes_) == 1: if labels is None: raise ValueError('y_true contains only one label ({0}). Please ' 'provide the true labels explicitly through the ' 'labels argument.'.format(lb.classes_[0])) else: raise ValueError('The labels array needs to contain at least two ' 'labels for log_loss, ' 'got {0}.'.format(lb.classes_)) transformed_labels = lb.transform(y_true) if transformed_labels.shape[1] == 1: transformed_labels = np.append(1 - transformed_labels, transformed_labels, axis=1) # Clipping y_pred = np.clip(y_pred, eps, 1 - eps) # If y_pred is of single dimension, assume y_true to be binary # and then check. if y_pred.ndim == 1: y_pred = y_pred[:, np.newaxis] if y_pred.shape[1] == 1: y_pred = np.append(1 - y_pred, y_pred, axis=1) # Check if dimensions are consistent. transformed_labels = check_array(transformed_labels) if len(lb.classes_) != y_pred.shape[1]: if labels is None: raise ValueError("y_true and y_pred contain different number of " "classes {0}, {1}. Please provide the true " "labels explicitly through the labels argument. " "Classes found in " "y_true: {2}".format(transformed_labels.shape[1], y_pred.shape[1], lb.classes_)) else: raise ValueError('The number of classes in labels is different ' 'from that in y_pred. Classes found in ' 'labels: {0}'.format(lb.classes_)) # Renormalize y_pred /= y_pred.sum(axis=1)[:, np.newaxis] loss = -(transformed_labels * np.log(y_pred)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize)
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None, labels=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. The log loss is only defined for two or more labels. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,) Predicted probabilities, as returned by a classifier's predict_proba method. If ``y_pred.shape = (n_samples,)`` the probabilities provided are assumed to be that of the positive class. The labels in ``y_pred`` are assumed to be ordered alphabetically, as done by :class:`preprocessing.LabelBinarizer`. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape (n_samples,), default=None Sample weights. labels : array-like, optional (default=None) If not provided, labels will be inferred from y_true. If ``labels`` is ``None`` and ``y_pred`` has shape (n_samples,) the labels are assumed to be binary and are inferred from ``y_true``. .. versionadded:: 0.18 Returns ------- loss : float Examples -------- >>> from sklearn.metrics import log_loss >>> log_loss(["spam", "ham", "ham", "spam"], ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ y_pred = check_array(y_pred, ensure_2d=False) check_consistent_length(y_pred, y_true, sample_weight) lb = LabelBinarizer() if labels is not None: lb.fit(labels) else: lb.fit(y_true) if len(lb.classes_) == 1: if labels is None: raise ValueError('y_true contains only one label ({0}). Please ' 'provide the true labels explicitly through the ' 'labels argument.'.format(lb.classes_[0])) else: raise ValueError('The labels array needs to contain at least two ' 'labels for log_loss, ' 'got {0}.'.format(lb.classes_)) transformed_labels = lb.transform(y_true) if transformed_labels.shape[1] == 1: transformed_labels = np.append(1 - transformed_labels, transformed_labels, axis=1) # Clipping y_pred = np.clip(y_pred, eps, 1 - eps) # If y_pred is of single dimension, assume y_true to be binary # and then check. if y_pred.ndim == 1: y_pred = y_pred[:, np.newaxis] if y_pred.shape[1] == 1: y_pred = np.append(1 - y_pred, y_pred, axis=1) # Check if dimensions are consistent. transformed_labels = check_array(transformed_labels) if len(lb.classes_) != y_pred.shape[1]: if labels is None: raise ValueError("y_true and y_pred contain different number of " "classes {0}, {1}. Please provide the true " "labels explicitly through the labels argument. " "Classes found in " "y_true: {2}".format(transformed_labels.shape[1], y_pred.shape[1], lb.classes_)) else: raise ValueError('The number of classes in labels is different ' 'from that in y_pred. Classes found in ' 'labels: {0}'.format(lb.classes_)) # Renormalize y_pred /= y_pred.sum(axis=1)[:, np.newaxis] loss = -(transformed_labels * np.log(y_pred)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize)
57,193
def run_tests(args): """Run the scripts to start end-to-end tests.""" if is_oppia_server_already_running(): sys.exit(1) install_third_party_libraries(args.skip_install) with contextlib.ExitStack() as stack: dev_mode = not args.prod_env if args.skip_build: build.modify_constants(prod_env=args.prod_env) else: build_js_files(dev_mode, source_maps=args.source_maps) stack.callback(build.set_constants_to_default) stack.enter_context(servers.managed_redis_server()) stack.enter_context(servers.managed_elasticsearch_dev_server()) if constants.EMULATOR_MODE: stack.enter_context(servers.managed_firebase_auth_emulator()) stack.enter_context( servers.managed_cloud_datastore_emulator(clear_datastore=True)) app_yaml_path = 'app.yaml' if args.prod_env else 'app_dev.yaml' stack.enter_context(servers.managed_dev_appserver( app_yaml_path, port=GOOGLE_APP_ENGINE_PORT, log_level=args.server_log_level, # Automatic restart can be disabled since we don't expect code # changes to happen while the e2e tests are running. automatic_restart=False, skip_sdk_update_check=True, env={ **os.environ, 'PORTSERVER_ADDRESS': common.PORTSERVER_SOCKET_FILEPATH, })) if args.suite == 'full': stack.enter_context(servers.managed_webdriver_server( chrome_version=args.chrome_driver_version)) proc = stack.enter_context(servers.managed_protractor_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) proc = stack.enter_context(servers.managed_webdriverio_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, chrome_version=args.chrome_driver_version, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) elif args.suite in SUITES_MIGRATED_TO_WEBDRIVERIO: proc = stack.enter_context(servers.managed_webdriverio_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, chrome_version=args.chrome_driver_version, stdout=subprocess.PIPE)) print( 'Servers have come up.\n' 'Note: You can view screenshots of failed tests ' 'in ../webdriverio-screenshots/') elif args.suite in SUITES_STILL_IN_PROTRACTOR: stack.enter_context(servers.managed_webdriver_server( chrome_version=args.chrome_driver_version)) proc = stack.enter_context(servers.managed_protractor_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) print( 'Servers have come up.\n' 'Note: If ADD_SCREENSHOT_REPORTER is set to true in ' 'core/tests/protractor.conf.js, you can view screenshots of the ' 'failed tests in ../protractor-screenshots/') else: print( 'The suite requested to run does not exists' 'Please provide a valid suite name') sys.exit(1) output_lines = [] while True: # Keep reading lines until an empty string is returned. Empty # strings signal that the process has ended. for line in iter(proc.stdout.readline, b''): if isinstance(line, str): # Although our unit tests always provide unicode strings, # the actual server needs this failsafe since it can output # non-unicode strings. line = line.encode('utf-8') # pragma: no cover output_lines.append(line.rstrip()) # Replaces non-ASCII characters with '?'. common.write_stdout_safe(line.decode('ascii', errors='replace')) # The poll() method returns None while the process is running, # otherwise it returns the return code of the process (an int). if proc.poll() is not None: break return output_lines, proc.returncode
def run_tests(args): """Run the scripts to start end-to-end tests.""" if is_oppia_server_already_running(): sys.exit(1) install_third_party_libraries(args.skip_install) with contextlib.ExitStack() as stack: dev_mode = not args.prod_env if args.skip_build: build.modify_constants(prod_env=args.prod_env) else: build_js_files(dev_mode, source_maps=args.source_maps) stack.callback(build.set_constants_to_default) stack.enter_context(servers.managed_redis_server()) stack.enter_context(servers.managed_elasticsearch_dev_server()) if constants.EMULATOR_MODE: stack.enter_context(servers.managed_firebase_auth_emulator()) stack.enter_context( servers.managed_cloud_datastore_emulator(clear_datastore=True)) app_yaml_path = 'app.yaml' if args.prod_env else 'app_dev.yaml' stack.enter_context(servers.managed_dev_appserver( app_yaml_path, port=GOOGLE_APP_ENGINE_PORT, log_level=args.server_log_level, # Automatic restart can be disabled since we don't expect code # changes to happen while the e2e tests are running. automatic_restart=False, skip_sdk_update_check=True, env={ **os.environ, 'PORTSERVER_ADDRESS': common.PORTSERVER_SOCKET_FILEPATH, })) if args.suite == 'full': stack.enter_context(servers.managed_webdriver_server( chrome_version=args.chrome_driver_version)) proc = stack.enter_context(servers.managed_protractor_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) proc = stack.enter_context(servers.managed_webdriverio_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, chrome_version=args.chrome_driver_version, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) elif args.suite in SUITES_MIGRATED_TO_WEBDRIVERIO: proc = stack.enter_context(servers.managed_webdriverio_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, chrome_version=args.chrome_driver_version, stdout=subprocess.PIPE)) print( 'Servers have come up.\n' 'Note: You can view screenshots of failed tests ' 'in ../webdriverio-screenshots/') elif args.suite in SUITES_STILL_IN_PROTRACTOR: stack.enter_context(servers.managed_webdriver_server( chrome_version=args.chrome_driver_version)) proc = stack.enter_context(servers.managed_protractor_server( suite_name=args.suite, dev_mode=dev_mode, debug_mode=args.debug_mode, sharding_instances=args.sharding_instances, stdout=subprocess.PIPE)) print( 'Servers have come up.\n' 'Note: If ADD_SCREENSHOT_REPORTER is set to true in ' 'core/tests/protractor.conf.js, you can view screenshots of the ' 'failed tests in ../protractor-screenshots/') else: print( 'The suite requested to run does not exist' 'Please provide a valid suite name') sys.exit(1) output_lines = [] while True: # Keep reading lines until an empty string is returned. Empty # strings signal that the process has ended. for line in iter(proc.stdout.readline, b''): if isinstance(line, str): # Although our unit tests always provide unicode strings, # the actual server needs this failsafe since it can output # non-unicode strings. line = line.encode('utf-8') # pragma: no cover output_lines.append(line.rstrip()) # Replaces non-ASCII characters with '?'. common.write_stdout_safe(line.decode('ascii', errors='replace')) # The poll() method returns None while the process is running, # otherwise it returns the return code of the process (an int). if proc.poll() is not None: break return output_lines, proc.returncode
12,414
def handle_ssh_pwauth(pw_auth, distro): """Apply sshd PasswordAuthentication changes. @param pw_auth: config setting from 'pw_auth'. Best given as True, False, or "unchanged". @param distro: an instance of the distro class for the target distribution @return: None""" cfg_name = "PasswordAuthentication" if isinstance(pw_auth, str): LOG.warning( "DEPRECATION: The 'ssh_pwauth' config key should be set to " "a boolean value. The string format is deprecated and will be " "removed in a future version of cloud-init." ) if util.is_true(pw_auth): cfg_val = "yes" elif util.is_false(pw_auth): cfg_val = "no" else: bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == "unchanged": LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) return updated = update_ssh_config({cfg_name: cfg_val}) if not updated: LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return service = distro.get_option("ssh_svcname", "ssh") try: distro.manage_service("restart", service) except subp.ProcessExecutionError as e: LOG.warning("Failed to restart the SSH deamon. %s: %s", service, e) else: LOG.debug("Restarted the SSH daemon.")
def handle_ssh_pwauth(pw_auth, distro): """Apply sshd PasswordAuthentication changes. @param pw_auth: config setting from 'pw_auth'. Best given as True, False, or "unchanged". @param distro: an instance of the distro class for the target distribution @return: None""" cfg_name = "PasswordAuthentication" if isinstance(pw_auth, str): LOG.warning( "DEPRECATION: The 'ssh_pwauth' config key should be set to " "a boolean value. The string format is deprecated and will be " "removed in a future version of cloud-init." ) if util.is_true(pw_auth): cfg_val = "yes" elif util.is_false(pw_auth): cfg_val = "no" else: bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == "unchanged": LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) return updated = update_ssh_config({cfg_name: cfg_val}) if not updated: LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return service = distro.get_option("ssh_svcname", "ssh") try: distro.manage_service("restart", service) except subp.ProcessExecutionError as e: LOG.warning(f"Wrote '{cfg_name} {cfg_val}' to {DEF_SSHD_CFG}. Unable to restart the SSH deamon {service}: {e}") else: LOG.debug("Restarted the SSH daemon.")
43,954
def _diff2(i, j, ri, rj, alpha, beta): r"""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function ri (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second order differentiated integral between two Gaussian functions """ p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3
def _diff2(i, j, ri, rj, alpha, beta): r"""Compute the second order differentiated integral needed for evaluating a kinetic integral. The second order integral :math:`D_{ij}^2`, where :math:`i` and :math:`j` denote angular momentum components of Gaussian functions, is computed from overlap integrals :math:`S` and the Gaussian exponent :math:`\beta` as [`Helgaker (1995) p804 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]: .. math:: D_{ij}^2 = j(j-1)S_{i,j-2}^0 - 2\beta(2j+1)S_{i,j}^0 + 4\beta^2 S_{i,j+2}^0. Args: i (integer): angular momentum component for the first Gaussian function j (integer): angular momentum component for the second Gaussian function ri (float): position component of the the first Gaussian function rj (float): position component of the the second Gaussian function alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function Returns: array[float]: second order differentiated integral between two Gaussian functions """ p = alpha + beta d1 = j * (j - 1) * anp.sqrt(anp.pi / p) * expansion(i, j - 2, ri, rj, alpha, beta, 0) d2 = -2 * beta * (2 * j + 1) * anp.sqrt(anp.pi / p) * expansion(i, j, ri, rj, alpha, beta, 0) d3 = 4 * beta ** 2 * anp.sqrt(anp.pi / p) * expansion(i, j + 2, ri, rj, alpha, beta, 0) return d1 + d2 + d3
7,063
def check_nested_run_dirs(run_dir: Union[Path, str], flow_name: str) -> None: """Disallow nested run dirs e.g. trying to install foo/bar where foo is already a valid workflow directory. Args: run_dir: Absolute workflow run directory path. flow_name: Workflow name. Raise: WorkflowFilesError: - reg dir is nested inside a run dir - reg dir contains a nested run dir (if not deeper than max scan depth) """ exc_msg = ( 'Nested run directories not allowed - cannot install workflow name ' '"{0}" as "{1}" is already a valid run directory.' ) def _check_child_dirs(path: Union[Path, str], depth_count: int = 1): for result in os.scandir(path): if result.is_dir() and not result.is_symlink(): if is_valid_run_dir(result.path): raise WorkflowFilesError( exc_msg.format(flow_name, result.path) ) if depth_count < MAX_SCAN_DEPTH: _check_child_dirs(result.path, depth_count + 1) reg_path: Union[Path, str] = os.path.normpath(run_dir) parent_dir = os.path.dirname(reg_path) while parent_dir not in {'', os.sep}: if is_valid_run_dir(parent_dir): raise WorkflowFilesError( exc_msg.format(flow_name, get_cylc_run_abs_path(parent_dir)) ) parent_dir = os.path.dirname(parent_dir) reg_path = get_cylc_run_abs_path(reg_path) if os.path.isdir(reg_path): _check_child_dirs(reg_path)
def check_nested_run_dirs(run_dir: Union[Path, str], flow_name: str) -> None: """Disallow nested run dirs e.g. trying to install foo/bar where foo is already a valid workflow directory. Args: run_dir: Absolute workflow run directory path. flow_name: Workflow name. Raise: WorkflowFilesError: contain _cylc-install dir. - reg dir contains a nested run dir (if not deeper than max scan depth) """ exc_msg = ( 'Nested run directories not allowed - cannot install workflow name ' '"{0}" as "{1}" is already a valid run directory.' ) def _check_child_dirs(path: Union[Path, str], depth_count: int = 1): for result in os.scandir(path): if result.is_dir() and not result.is_symlink(): if is_valid_run_dir(result.path): raise WorkflowFilesError( exc_msg.format(flow_name, result.path) ) if depth_count < MAX_SCAN_DEPTH: _check_child_dirs(result.path, depth_count + 1) reg_path: Union[Path, str] = os.path.normpath(run_dir) parent_dir = os.path.dirname(reg_path) while parent_dir not in {'', os.sep}: if is_valid_run_dir(parent_dir): raise WorkflowFilesError( exc_msg.format(flow_name, get_cylc_run_abs_path(parent_dir)) ) parent_dir = os.path.dirname(parent_dir) reg_path = get_cylc_run_abs_path(reg_path) if os.path.isdir(reg_path): _check_child_dirs(reg_path)
42,825
def reinstall_packages_sb(packages_path: str, dry_run: bool = False): """Reinstall all packages from the files in backup/installs.""" def run_cmd_if_no_dry_run(command, dry_run) -> int: if dry_run: print_yellow_bold(f"$ {command}") # Return 0 for any processes depending on chained successful commands return 0 else: return run_cmd(command) exit_if_dir_is_empty(packages_path, 'package') print_section_header("REINSTALLING PACKAGES", Fore.BLUE) # Figure out which install lists they have saved package_mgrs = set() for file in os.listdir(packages_path): manager = file.split("_")[0].replace("-", " ") if manager in ["gem", "brew-cask", "cargo", "npm", "pip", "pip3", "brew", "vscode", "apm", "macports"]: package_mgrs.add(file.split("_")[0]) print_blue_bold("Package Manager Backups Found:") for mgr in package_mgrs: print_yellow("\t{}".format(mgr)) print() # TODO: Multithreading for reinstallation. # Construct reinstallation commands and execute them for pm in package_mgrs: if pm in ["brew", "brew-cask"]: pm_formatted = pm.replace("-", " ") print_pkg_mgr_reinstall(pm_formatted) cmd = f"xargs {pm.replace('-', ' ')} install < {packages_path}/{pm_formatted}_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "npm": print_pkg_mgr_reinstall(pm) cmd = f"cat {packages_path}/npm_list.txt | xargs npm install -g" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "pip": print_pkg_mgr_reinstall(pm) cmd = f"pip install -r {packages_path}/pip_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "pip3": print_pkg_mgr_reinstall(pm) cmd = f"pip3 install -r {packages_path}/pip3_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "vscode": print_pkg_mgr_reinstall(pm) with open(f"{packages_path}/vscode_list.txt", "r") as file: for package in file: cmd = f"code --install-extension {package}" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "apm": print_pkg_mgr_reinstall(pm) cmd = f"apm install --packages-file {packages_path}/apm_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "macports": print_red_bold("WARNING: Macports reinstallation is not supported.") elif pm == "gem": print_pkg_mgr_reinstall(pm) cmd = f"cat {packages_path}/gem_list.txt | xargs -L 1 sudo gem install --no-ri --no-rdoc" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "cargo": print_red_bold("WARNING: Cargo reinstallation is not possible at the moment.\ \n -> https://github.com/rust-lang/cargo/issues/5593") print_section_header("PACKAGE REINSTALLATION COMPLETED", Fore.BLUE)
def reinstall_packages_sb(packages_path: str, dry_run: bool = False): """Reinstall all packages from the files in backup/installs.""" def run_cmd_if_no_dry_run(command, dry_run) -> int: if dry_run: print_yellow_bold(f"$ {command}") # Return 0 for any processes depending on chained successful commands return 0 else: return run_cmd(command) exit_if_dir_is_empty(packages_path, 'package') print_section_header("REINSTALLING PACKAGES", Fore.BLUE) # Figure out which install lists they have saved package_mgrs = set() for file in os.listdir(packages_path): manager = file.split("_")[0].replace("-", " ") if manager in ["gem", "brew-cask", "cargo", "npm", "pip", "pip3", "brew", "vscode", "apm", "macports"]: package_mgrs.add(file.split("_")[0]) print_blue_bold("Package Manager Backups Found:") for mgr in package_mgrs: print_yellow("\t{}".format(mgr)) print() # TODO: Multithreading for reinstallation. # Construct reinstallation commands and execute them for pm in package_mgrs: if pm in ["brew", "brew-cask"]: pm_formatted = pm.replace("-", " ") print_pkg_mgr_reinstall(pm_formatted) cmd = f"xargs {pm.replace('-', ' ')} install < {packages_path}/{pm_formatted}_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "npm": print_pkg_mgr_reinstall(pm) cmd = f"cat {packages_path}/npm_list.txt | xargs npm install -g" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "pip": print_pkg_mgr_reinstall(pm) cmd = f"pip install -r {packages_path}/pip_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "pip3": print_pkg_mgr_reinstall(pm) cmd = f"pip3 install -r {packages_path}/pip3_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "vscode": print_pkg_mgr_reinstall(pm) with open(f"{packages_path}/vscode_list.txt", "r") as file: for package in file: cmd = f"code --install-extension {package}" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "apm": print_pkg_mgr_reinstall(pm) cmd = f"apm install --packages-file {packages_path}/apm_list.txt" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "macports": print_red_bold("WARNING: Macports reinstallation is not supported.") elif pm == "gem": print_pkg_mgr_reinstall(pm) cmd = f"cat {packages_path}/gem_list.txt | xargs -L 1 gem install --no-ri --no-rdoc" run_cmd_if_no_dry_run(cmd, dry_run) elif pm == "cargo": print_red_bold("WARNING: Cargo reinstallation is not possible at the moment.\ \n -> https://github.com/rust-lang/cargo/issues/5593") print_section_header("PACKAGE REINSTALLATION COMPLETED", Fore.BLUE)
43,198
def main(args: argparse.Namespace) -> None: xml_output, xml_filename = None, None test_runner_kwargs = {"verbosity": 2} Runner = unittest.TextTestRunner if args.xml_report: import xmlrunner from datetime import date # NOQA this_dir = os.path.abspath(os.path.dirname(__file__)) xml_filename = os.path.join( this_dir, f"""{date.today().strftime("%y%m%d")}.xml""", ) xml_output = open(xml_filename, "w") test_runner_kwargs["output"] = xml_output Runner = xmlrunner.XMLTestRunner runner = Runner(**test_runner_kwargs) errcode = 0 for test_dir in args.include: test_dir = os.path.join(TEST_ROOT, test_dir) print(test_dir) suite = unittest.TestLoader().discover(test_dir) print("\nExecuting tests from " + test_dir) result = runner.run(suite) if not result.wasSuccessful(): errcode = 1 if xml_output is not None: xml_output.close() print(f"\n### Report is available at {xml_filename}") sys.exit(errcode)
def main(args: argparse.Namespace) -> None: xml_output, xml_filename = None, None test_runner_kwargs = {"verbosity": 2} Runner = unittest.TextTestRunner if args.xml_report: import xmlrunner from datetime import date # NOQA this_dir = os.path.abspath(os.path.dirname(__file__)) xml_filename = os.path.join( this_dir, f"""{date.today().strftime("%y%m%d")}.xml""", ) xml_output = open(xml_filename, "wb") test_runner_kwargs["output"] = xml_output Runner = xmlrunner.XMLTestRunner runner = Runner(**test_runner_kwargs) errcode = 0 for test_dir in args.include: test_dir = os.path.join(TEST_ROOT, test_dir) print(test_dir) suite = unittest.TestLoader().discover(test_dir) print("\nExecuting tests from " + test_dir) result = runner.run(suite) if not result.wasSuccessful(): errcode = 1 if xml_output is not None: xml_output.close() print(f"\n### Report is available at {xml_filename}") sys.exit(errcode)
1,971
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', max_iter=200, tol=1e-4, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0): """Compute Non-negative Matrix Factorization with Multiplicative Update The objective function is _beta_divergence(X, WH) and is minimized with an alternating minimization of W and H. Each minimization is done with a Multiplicative Update. Parameters ---------- X : array-like of shape (n_samples, n_features) Constant input matrix. W : array-like of shape (n_samples, n_components) Initial guess for the solution. H : array-like of shape (n_components, n_features) Initial guess for the solution. beta_loss : float or string in {'frobenius', 'kullback-leibler', \ 'itakura-saito'}, default='frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. max_iter : int, default=200 Number of iterations. tol : float, default=1e-4 Tolerance of the stopping condition. l1_reg_W : float, default=0. L1 regularization parameter for W. l1_reg_H : float, default=0. L1 regularization parameter for H. l2_reg_W : float, default=0. L2 regularization parameter for W. l2_reg_H : float, default=0. L2 regularization parameter for H. update_H : bool, default=True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : int, default=0 The verbosity level. Returns ------- W : ndarray of shape (n_samples, n_components) Solution to the non-negative least squares problem. H : ndarray of shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ start_time = time.time() beta_loss = _beta_loss_to_float(beta_loss) # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] if beta_loss < 1: gamma = 1. / (2. - beta_loss) elif beta_loss > 2: gamma = 1. / (beta_loss - 1.) else: gamma = 1. # used for the convergence criterion error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) previous_error = error_at_init H_sum, HHt, XHt = None, None, None for n_iter in range(1, max_iter + 1): # update W # H_sum, HHt and XHt are saved and reused if not update_H delta_W, H_sum, HHt, XHt = _multiplicative_update_w( X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma, H_sum, HHt, XHt, update_H) W *= delta_W # necessary for stability with beta_loss < 1 if beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0. # update H if update_H: delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma) H *= delta_H # These values will be recomputed since H changed H_sum, HHt, XHt = None, None, None # necessary for stability with beta_loss < 1 if beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0. # test convergence criterion every 10 iterations if tol > 0 and n_iter % 10 == 0: error = _beta_divergence(X, W, H, beta_loss, square_root=True) if verbose: iter_time = time.time() print("Epoch %02d reached after %.3f seconds, error: %f" % (n_iter, iter_time - start_time, error)) if (previous_error - error) / error_at_init < tol: break previous_error = error # do not print if we have already printed in the convergence test if verbose and (tol == 0 or n_iter % 10 != 0): end_time = time.time() print("Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)) return W, H, n_iter
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius', max_iter=200, tol=1e-4, l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True, verbose=0): """Compute Non-negative Matrix Factorization with Multiplicative Update The objective function is _beta_divergence(X, WH) and is minimized with an alternating minimization of W and H. Each minimization is done with a Multiplicative Update. Parameters ---------- X : array-like of shape (n_samples, n_features) Constant input matrix. W : array-like of shape (n_samples, n_components) Initial guess for the solution. H : array-like of shape (n_components, n_features) Initial guess for the solution. beta_loss : float or string in {'frobenius', 'kullback-leibler', \ 'itakura-saito'}, default='frobenius' String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}. Beta divergence to be minimized, measuring the distance between X and the dot product WH. Note that values different from 'frobenius' (or 2) and 'kullback-leibler' (or 1) lead to significantly slower fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input matrix X cannot contain zeros. max_iter : int, default=200 Number of iterations. tol : float, default=1e-4 Tolerance of the stopping condition. l1_reg_W : float, default=0. L1 regularization parameter for W. l1_reg_H : float, default=0. L1 regularization parameter for H. l2_reg_W : float, default=0. L2 regularization parameter for W. l2_reg_H : float, default=0. L2 regularization parameter for H. update_H : bool, default=True Set to True, both W and H will be estimated from initial guesses. Set to False, only W will be estimated. verbose : int, default=0 The verbosity level. Returns ------- W : ndarray of shape (n_samples, n_components) Solution to the non-negative least squares problem. H : ndarray of shape (n_components, n_features) Solution to the non-negative least squares problem. n_iter : int The number of iterations done by the algorithm. References ---------- Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix factorization with the beta-divergence. Neural Computation, 23(9). """ start_time = time.time() beta_loss = _beta_loss_to_float(beta_loss) # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011] if beta_loss < 1: gamma = 1. / (2. - beta_loss) elif beta_loss > 2: gamma = 1. / (beta_loss - 1.) else: gamma = 1. # used for the convergence criterion error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True) previous_error = error_at_init H_sum, HHt, XHt = None, None, None for n_iter in range(1, max_iter + 1): # update W # H_sum, HHt and XHt are saved and reused if not update_H delta_W, H_sum, HHt, XHt = _multiplicative_update_w( X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma, H_sum, HHt, XHt, update_H) W *= delta_W # necessary for stability with beta_loss < 1 if beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0. # update H if update_H: delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma) H *= delta_H # These values will be recomputed since H changed H_sum, HHt, XHt = None, None, None # necessary for stability with beta_loss < 1 if beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0. # test convergence criterion every 10 iterations if tol > 0 and n_iter % 10 == 0: error = _beta_divergence(X, W, H, beta_loss, square_root=True) if verbose: iter_time = time.time() print("Epoch %02d reached after %.3f seconds, error: %f" % (n_iter, iter_time - start_time, error)) if (previous_error - error) / error_at_init < tol: break previous_error = error # do not print if we have already printed in the convergence test if verbose and (tol == 0 or n_iter % 10 != 0): end_time = time.time() print("Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)) return W, H, n_iter
21,216
def generate_command_cache(bench_path='.'): """Caches all available commands (even custom apps) via Frappe Default caching behaviour: generated the first time any command (for a specific bench directory) """ python = get_env_cmd('python', bench_path=bench_path) sites_path = os.path.join(bench_path, 'sites') if os.path.exists(bench_cache_file): os.remove(bench_cache_file) try: command = "{0} -m frappe.utils.bench_helper get-frappe-commands".format(python) logger.debug('generate_command_cache(\'%s\') executing: %s', bench_path, command) output = get_cmd_output(command, cwd=sites_path) with open(bench_cache_file, 'w') as f: json.dump(eval(output), f) return json.loads(output) except subprocess.CalledProcessError as e: logger.error('generate_command_cache(\'%s\') failed executing: %s', bench_path, command, exc_info=e) if hasattr(e, "stderr"): print(e.stderr.decode('utf-8')) raise e
def generate_command_cache(bench_path='.'): """Caches all available commands (even custom apps) via Frappe Default caching behaviour: generated the first time any command (for a specific bench directory) """ python = get_env_cmd('python', bench_path=bench_path) sites_path = os.path.join(bench_path, 'sites') if os.path.exists(bench_cache_file): os.remove(bench_cache_file) try: command = "{0} -m frappe.utils.bench_helper get-frappe-commands".format(python) logger.debug("generate_command_cache('%s') executing: %s", bench_path, command) output = get_cmd_output(command, cwd=sites_path) with open(bench_cache_file, 'w') as f: json.dump(eval(output), f) return json.loads(output) except subprocess.CalledProcessError as e: logger.error('generate_command_cache(\'%s\') failed executing: %s', bench_path, command, exc_info=e) if hasattr(e, "stderr"): print(e.stderr.decode('utf-8')) raise e
34,116
def load(filename: Text = None, **kwargs: Any) -> 'RasaNLUModelConfig': if filename is None and os.path.isfile(DEFAULT_CONFIG_LOCATION): filename = DEFAULT_CONFIG_LOCATION if filename is not None: try: file_config = utils.read_yaml_file(filename) except yaml.parser.ParserError as e: raise InvalidConfigError("Failed to read configuration file " "'{}'. Error: {}".format(filename, e)) if kwargs: file_config.update(kwargs) return RasaNLUModelConfig(file_config) else: return RasaNLUModelConfig(kwargs)
def load(filename: Optional[Text] = None, **kwargs: Any) -> 'RasaNLUModelConfig': if filename is None and os.path.isfile(DEFAULT_CONFIG_LOCATION): filename = DEFAULT_CONFIG_LOCATION if filename is not None: try: file_config = utils.read_yaml_file(filename) except yaml.parser.ParserError as e: raise InvalidConfigError("Failed to read configuration file " "'{}'. Error: {}".format(filename, e)) if kwargs: file_config.update(kwargs) return RasaNLUModelConfig(file_config) else: return RasaNLUModelConfig(kwargs)
2,729
def pairwise_kernels( X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds ): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are: ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine'] Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features) Array of pairwise kernels between samples, or a feature array. The shape of the array should be (n_samples_X, n_samples_X) if metric == "precomputed" and (n_samples_X, n_features) otherwise. Y : ndarray of shape (n_samples_Y, n_features), default=None A second feature array only if X has shape (n_samples_X, n_features). metric : str or callable, default="linear" The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return the corresponding kernel value as a single number. This means that callables from :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on matrices, not single samples. Use the string identifying the kernel instead. filter_params : bool, default=False Whether to filter invalid parameters or not. n_jobs : int, default=None The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. **kwds : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y) A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ # import GPKernel locally to prevent circular imports from ..gaussian_process.kernels import Kernel as GPKernel if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif isinstance(metric, GPKernel): func = metric.__call__ elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]} func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
def pairwise_kernels( X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds ): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are: ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine'] Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_features) Array of pairwise kernels between samples, or a feature array. The shape of the array should be (n_samples_X, n_samples_X) if metric == "precomputed" and (n_samples_X, n_features) otherwise. Y : ndarray of shape (n_samples_Y, n_features), default=None A second feature array only if X has shape (n_samples_X, n_features). metric : str or callable, default="linear" The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return the corresponding kernel value as a single number. This means that callables from :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on matrices, not single samples. Use the string identifying the kernel instead. filter_params : bool, default=False Whether to filter invalid parameters or not. n_jobs : int, default=None The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. **kwds : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y) A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ # import GPKernel locally to prevent circular imports from ..gaussian_process.kernels import Kernel as GPKernel if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif isinstance(metric, GPKernel): func = metric.__call__ elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]} func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
22,769
def generate_key(key_size: int, key_dir: str, key_type: str = "rsa", elliptic_curve: str = "secp256r1", keyname: str = "key-certbot.pem", strict_permissions: bool = True) -> util.Key: """Initializes and saves a privkey. Inits key and saves it in PEM format on the filesystem. .. note:: keyname is the attempted filename, it may be different if a file already exists at the path. :param int key_size: key size in bits if key size is rsa. :param str key_dir: Key save directory. :param str key_type: Key Type [rsa, ecdsa] :param str elliptic_curve: Name of the elliptic curve if key type is ecdsa. :param str keyname: Filename of key :param boolean strict_permissions: If true, key will be saved with strict permissions (POSIX mode 0600). :returns: Key :rtype: :class:`certbot.util.Key` :raises ValueError: If unable to generate the key given key_size. """ try: key_pem = make_key( bits=key_size, elliptic_curve=elliptic_curve or "secp256r1", key_type=key_type, ) except ValueError as err: logger.debug("", exc_info=True) logger.error("Encountered error while making key: %s", str(err)) raise err # Save file util.make_or_verify_dir(key_dir, 0o700, strict_permissions) key_f, key_path = util.unique_file( os.path.join(key_dir, keyname), 0o600, "wb") with key_f: key_f.write(key_pem) if key_type == 'rsa': logger.debug("Generating RSA key (%d bits): %s", key_size, key_path) else: logger.debug("Generating ECDSA key (%d bits): %s", key_size, key_path) return util.Key(key_path, key_pem)
def generate_key(key_size: int, key_dir: str, key_type: str = "rsa", elliptic_curve: str = "secp256r1", keyname: str = "key-certbot.pem", strict_permissions: bool = True) -> util.Key: """Initializes and saves a privkey. Inits key and saves it in PEM format on the filesystem. .. note:: keyname is the attempted filename, it may be different if a file already exists at the path. :param int key_size: key size in bits if key size is rsa. :param str key_dir: Key save directory. :param str key_type: Key Type [rsa, ecdsa] :param str elliptic_curve: Name of the elliptic curve if key type is ecdsa. :param str keyname: Filename of key :param bool strict_permissions: If true, key will be saved with strict permissions (POSIX mode 0600). :returns: Key :rtype: :class:`certbot.util.Key` :raises ValueError: If unable to generate the key given key_size. """ try: key_pem = make_key( bits=key_size, elliptic_curve=elliptic_curve or "secp256r1", key_type=key_type, ) except ValueError as err: logger.debug("", exc_info=True) logger.error("Encountered error while making key: %s", str(err)) raise err # Save file util.make_or_verify_dir(key_dir, 0o700, strict_permissions) key_f, key_path = util.unique_file( os.path.join(key_dir, keyname), 0o600, "wb") with key_f: key_f.write(key_pem) if key_type == 'rsa': logger.debug("Generating RSA key (%d bits): %s", key_size, key_path) else: logger.debug("Generating ECDSA key (%d bits): %s", key_size, key_path) return util.Key(key_path, key_pem)
3,522
def rtd_parse_tags(tag_string): """ Parses a string into its tags. - Lowercases all tags - Converts underscores to hyphens - Slugifies tags - Removes empty tags :see: https://django-taggit.readthedocs.io/page/custom_tagging.html :param tag_string: a delimited string of tags :return: a sorted list of tag strings """ if tag_string: tag_string = tag_string.lower().replace('_', '-') tags = [slugify(tag) for tag in _parse_tags(tag_string)] return sorted([tag for tag in tags if tag])
def rtd_parse_tags(tag_string): """ Parses a string into its tags. - Lowercases all tags - Converts underscores to hyphens - Slugifies tags - Removes empty tags :see: https://django-taggit.readthedocs.io/page/custom_tagging.html :param tag_string: a delimited string of tags :return: a sorted list of tag strings """ if tag_string: tag_string = tag_string.lower().replace('_', '-') tags = [slugify(tag) for tag in _parse_tags(tag_string)] return sorted(tag for tag in tags if tag)
8,979
def get_time_unit(years=0, months=0, days=0, hours=0, minutes=0, seconds=0): """Map a time in (y, m, d, h, min, s) to its labels. :param int years: number of years :param int months: number of months :param int days: number of days :param int hours: number of hours :param int minutes: number of minutes :param int seconds: number of seconds :return: a tuple of 2-value tuples, each for a time amount and its label :rtype: :class:`tuple` This helper function get a time split in years, months, days, hours, minutes, and seconds to return a tuple with the correct label for each unit. The label is pluralized and account for zéro, one, and more than one value per unit:: >>> get_time_unit(days=1, hours=15, minutes=54, seconds=19) ( (0, 'years'), (0, 'months'), (1, 'day'), (15, 'hours'), (54, 'minutes'), (19, 'seconds'), ) This function can be used with :func:`seconds_to_split`:: >>> get_time_unit(*seconds_to_split(143659)) # ... same result as the example above .. note:: This function always return a tuple with **all** time units, even when their amount is 0 (which is their default value). """ years_text = "year{}".format("s" if years != 1 else "") months_text = "month{}".format("s" if months != 1 else "") days_text = "day{}".format("s" if days != 1 else "") hours_text = "hour{}".format("s" if hours != 1 else "") minutes_text = "minute{}".format("s" if minutes != 1 else "") seconds_text = "second{}".format("s" if seconds != 1 else "") return ( (years, years_text), (months, months_text), (days, days_text), (hours, hours_text), (minutes, minutes_text), (seconds, seconds_text), )
def get_time_unit(years=0, months=0, days=0, hours=0, minutes=0, seconds=0): """Map a time in (y, m, d, h, min, s) to its labels. :param int years: number of years :param int months: number of months :param int days: number of days :param int hours: number of hours :param int minutes: number of minutes :param int seconds: number of seconds :return: a tuple of 2-value tuples, each for a time amount and its label :rtype: :class:`tuple` This helper function get a time split in years, months, days, hours, minutes, and seconds to return a tuple with the correct label for each unit. The label is pluralized and account for zéro, one, and more than one value per unit:: >>> get_time_unit(days=1, hours=15, minutes=54, seconds=19) ( (0, 'years'), (0, 'months'), (1, 'day'), (15, 'hours'), (54, 'minutes'), (19, 'seconds'), ) This function can be used with :func:`seconds_to_split`:: >>> get_time_unit(*seconds_to_split(143659)) # ... same result as the example above .. note:: This function always returns a tuple with **all** time units, even when their amount is 0 (which is their default value). """ years_text = "year{}".format("s" if years != 1 else "") months_text = "month{}".format("s" if months != 1 else "") days_text = "day{}".format("s" if days != 1 else "") hours_text = "hour{}".format("s" if hours != 1 else "") minutes_text = "minute{}".format("s" if minutes != 1 else "") seconds_text = "second{}".format("s" if seconds != 1 else "") return ( (years, years_text), (months, months_text), (days, days_text), (hours, hours_text), (minutes, minutes_text), (seconds, seconds_text), )
6,914
def get_list_context(context, doctype, web_form_name=None): from frappe.modules import load_doctype_module list_context = context or frappe._dict() meta = frappe.get_meta(doctype) def update_context_from_module(module, list_context): # call the user defined method `get_list_context` # from the python module if hasattr(module, "get_list_context"): out = frappe._dict(module.get_list_context(list_context) or {}) if out: list_context = out return list_context # get context from the doctype module if not meta.custom: # custom doctypes don't have modules module = load_doctype_module(doctype) list_context = update_context_from_module(module, list_context) # get context for custom webform if meta.custom and web_form_name: list_context_for_custom_webform = frappe.get_hooks('get_list_context_for_custom_webform') if list_context_for_custom_webform: out = frappe._dict(frappe.get_attr(list_context_for_custom_webform[0])(meta.module) or {}) if out: list_context = out # get context from web form module if web_form_name: web_form = frappe.get_doc('Web Form', web_form_name) list_context = update_context_from_module(web_form.get_web_form_module(), list_context) # get path from '/templates/' folder of the doctype if not meta.custom and not list_context.row_template: list_context.row_template = meta.get_row_template() if not meta.custom and not list_context.list_template: list_context.template = meta.get_list_template() or "www/list.html" return list_context
def get_list_context(context, doctype, web_form_name=None): from frappe.modules import load_doctype_module list_context = context or frappe._dict() meta = frappe.get_meta(doctype) def update_context_from_module(module, list_context): # call the user defined method `get_list_context` # from the python module if hasattr(module, "get_list_context"): out = frappe._dict(module.get_list_context(list_context) or {}) if out: list_context = out return list_context # get context from the doctype module if not meta.custom: # custom doctypes don't have modules module = load_doctype_module(doctype) list_context = update_context_from_module(module, list_context) # get context for custom webform if meta.custom and web_form_name: webform_list_contexts = frappe.get_hooks('webform_list_context') if list_context_for_custom_webform: out = frappe._dict(frappe.get_attr(list_context_for_custom_webform[0])(meta.module) or {}) if out: list_context = out # get context from web form module if web_form_name: web_form = frappe.get_doc('Web Form', web_form_name) list_context = update_context_from_module(web_form.get_web_form_module(), list_context) # get path from '/templates/' folder of the doctype if not meta.custom and not list_context.row_template: list_context.row_template = meta.get_row_template() if not meta.custom and not list_context.list_template: list_context.template = meta.get_list_template() or "www/list.html" return list_context
20,406
def _get_ldap_interface(): global _ldap_interface if _ldap_interface is None: conf = { "vendor": "ldap", "name": "as-root", "parameters": { 'uri': 'ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi', 'base_dn': 'dc=yunohost,dc=org', 'user_rdn': 'gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth' }, "extra": {} } _ldap_interface = ldap.Authenticator(**conf) return _ldap_interface
def _get_ldap_interface(): global _ldap_interface if _ldap_interface is None: conf = { "vendor": "ldap", "name": "as-root", "parameters": { 'uri': 'ldapi://%2Fvar%2Frun%2Fslapd%2Fldapi', 'base_dn': 'dc=yunohost,dc=org', 'user_rdn': 'gidNumber=0,uidNumber=0,cn=peercred,cn=external,cn=auth' }, "extra": {} } _ldap_interface = ldap.Authenticator(**conf) return _ldap_interface
9,397
def main(): ssh_defaults = dict( bits=0, type='rsa', passphrase=None, comment='ansible-generated on %s' % socket.gethostname() ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), name=dict(type='str', required=True, aliases=['user']), uid=dict(type='str'), non_unique=dict(type='bool', default=False), group=dict(type='str'), groups=dict(type='list'), comment=dict(type='str'), home=dict(type='path'), shell=dict(type='str'), password=dict(type='str', no_log=True), login_class=dict(type='str'), # following options are specific to macOS hidden=dict(type='bool'), # following options are specific to selinux seuser=dict(type='str'), # following options are specific to userdel force=dict(type='bool', default=False), remove=dict(type='bool', default=False), # following options are specific to useradd create_home=dict(type='bool', default=True, aliases=['createhome']), skeleton=dict(type='str'), system=dict(type='bool', default=False), # following options are specific to usermod move_home=dict(type='bool', default=False), append=dict(type='bool', default=False), # following are specific to ssh key generation generate_ssh_key=dict(type='bool'), ssh_key_bits=dict(type='int', default=ssh_defaults['bits']), ssh_key_type=dict(type='str', default=ssh_defaults['type']), ssh_key_file=dict(type='path'), ssh_key_comment=dict(type='str', default=ssh_defaults['comment']), ssh_key_passphrase=dict(type='str', no_log=True), update_password=dict(type='str', default='always', choices=['always', 'on_create']), expires=dict(type='float'), password_lock=dict(type='bool'), local=dict(type='bool'), profile=dict(type='str'), authorization=dict(type='str'), role=dict(type='str'), ), supports_check_mode=True ) user = User(module) user.check_password_encrypted() module.debug('User instantiated - platform %s' % user.platform) if user.distribution: module.debug('User instantiated - distribution %s' % user.distribution) rc = None out = '' err = '' result = {} result['name'] = user.name result['state'] = user.state if user.state == 'absent': if user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.remove_user() if rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) result['force'] = user.force result['remove'] = user.remove result['diff'] = { 'before': 'user exists\n', 'after': 'user removed\n', } elif user.state == 'present': if not user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.create_user() if module.check_mode: result['system'] = user.name else: result['system'] = user.system result['create_home'] = user.create_home result['diff'] = { 'before': 'user does not exist\n', 'after': 'user created\n', } else: # modify user (note: this function is check mode aware) (rc, out, err) = user.modify_user() result['append'] = user.append result['move_home'] = user.move_home result['diff'] = { 'before': ''.join([ '%s = %s\n' % (key, oldv) for key, (oldv, newv) in sorted(user.changes.items()) ]), 'after': ''.join([ '%s = %s\n' % (key, newv) for key, (oldv, newv) in sorted(user.changes.items()) ]), } if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if user.password is not None: result['password'] = 'NOT_LOGGING_PASSWORD' if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if user.user_exists(): info = user.user_info() if info is False: result['msg'] = "failed to look up user name: %s" % user.name result['failed'] = True result['uid'] = info[2] result['group'] = info[3] result['comment'] = info[4] result['home'] = info[5] result['shell'] = info[6] if user.groups is not None: result['groups'] = user.groups # handle missing homedirs info = user.user_info() if user.home is None: user.home = info[5] if not os.path.exists(user.home) and user.create_home: if not module.check_mode: user.create_homedir(user.home) user.chown_homedir(info[2], info[3], user.home) result['changed'] = True result['diff']['after'] += 'created %s\n' % user.home # deal with ssh key if user.sshkeygen: # generate ssh key (note: this function is check mode aware) (rc, out, err) = user.ssh_key_gen() if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if rc == 0: result['changed'] = True result['diff']['after'] += 'generated SSH key' (rc, out, err) = user.ssh_key_fingerprint() if rc == 0: result['ssh_fingerprint'] = out.strip() else: result['ssh_fingerprint'] = err.strip() result['ssh_key_file'] = user.get_ssh_key_path() result['ssh_public_key'] = user.get_ssh_public_key() module.exit_json(**result)
def main(): ssh_defaults = dict( bits=0, type='rsa', passphrase=None, comment='ansible-generated on %s' % socket.gethostname() ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), name=dict(type='str', required=True, aliases=['user']), uid=dict(type='str'), non_unique=dict(type='bool', default=False), group=dict(type='str'), groups=dict(type='list'), comment=dict(type='str'), home=dict(type='path'), shell=dict(type='str'), password=dict(type='str', no_log=True), login_class=dict(type='str'), # following options are specific to macOS hidden=dict(type='bool'), # following options are specific to selinux seuser=dict(type='str'), # following options are specific to userdel force=dict(type='bool', default=False), remove=dict(type='bool', default=False), # following options are specific to useradd create_home=dict(type='bool', default=True, aliases=['createhome']), skeleton=dict(type='str'), system=dict(type='bool', default=False), # following options are specific to usermod move_home=dict(type='bool', default=False), append=dict(type='bool', default=False), # following are specific to ssh key generation generate_ssh_key=dict(type='bool'), ssh_key_bits=dict(type='int', default=ssh_defaults['bits']), ssh_key_type=dict(type='str', default=ssh_defaults['type']), ssh_key_file=dict(type='path'), ssh_key_comment=dict(type='str', default=ssh_defaults['comment']), ssh_key_passphrase=dict(type='str', no_log=True), update_password=dict(type='str', default='always', choices=['always', 'on_create']), expires=dict(type='float'), password_lock=dict(type='bool'), local=dict(type='bool'), profile=dict(type='str'), authorization=dict(type='str'), role=dict(type='str'), ), supports_check_mode=True ) user = User(module) user.check_password_encrypted() module.debug('User instantiated - platform %s' % user.platform) if user.distribution: module.debug('User instantiated - distribution %s' % user.distribution) rc = None out = '' err = '' result = {} result['name'] = user.name result['state'] = user.state if user.state == 'absent': if user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.remove_user() if rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) result['force'] = user.force result['remove'] = user.remove result['diff'] = { 'before': 'user exists\n', 'after': 'user removed\n', } elif user.state == 'present': if not user.user_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = user.create_user() if module.check_mode: result['system'] = user.name else: result['system'] = user.system result['create_home'] = user.create_home result['diff'] = { 'before': 'user does not exist\n', 'after': '{0} created\n'.format(user.name), } else: # modify user (note: this function is check mode aware) (rc, out, err) = user.modify_user() result['append'] = user.append result['move_home'] = user.move_home result['diff'] = { 'before': ''.join([ '%s = %s\n' % (key, oldv) for key, (oldv, newv) in sorted(user.changes.items()) ]), 'after': ''.join([ '%s = %s\n' % (key, newv) for key, (oldv, newv) in sorted(user.changes.items()) ]), } if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if user.password is not None: result['password'] = 'NOT_LOGGING_PASSWORD' if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if user.user_exists(): info = user.user_info() if info is False: result['msg'] = "failed to look up user name: %s" % user.name result['failed'] = True result['uid'] = info[2] result['group'] = info[3] result['comment'] = info[4] result['home'] = info[5] result['shell'] = info[6] if user.groups is not None: result['groups'] = user.groups # handle missing homedirs info = user.user_info() if user.home is None: user.home = info[5] if not os.path.exists(user.home) and user.create_home: if not module.check_mode: user.create_homedir(user.home) user.chown_homedir(info[2], info[3], user.home) result['changed'] = True result['diff']['after'] += 'created %s\n' % user.home # deal with ssh key if user.sshkeygen: # generate ssh key (note: this function is check mode aware) (rc, out, err) = user.ssh_key_gen() if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) if rc == 0: result['changed'] = True result['diff']['after'] += 'generated SSH key' (rc, out, err) = user.ssh_key_fingerprint() if rc == 0: result['ssh_fingerprint'] = out.strip() else: result['ssh_fingerprint'] = err.strip() result['ssh_key_file'] = user.get_ssh_key_path() result['ssh_public_key'] = user.get_ssh_public_key() module.exit_json(**result)
29,589
def test_timeout_sync(): with cluster() as (scheduler, workers): with Client(scheduler["address"]): s = Semaphore(name="x") # Using the context manager already acquires a lease, so the line below won't be able to acquire another one with s: assert s.acquire(timeout=0.025) is False
def test_timeout_sync(c, s, a, b): with cluster() as (scheduler, workers): with Client(scheduler["address"]): s = Semaphore(name="x") # Using the context manager already acquires a lease, so the line below won't be able to acquire another one with s: assert s.acquire(timeout=0.025) is False
50,716
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the ThinkingCleaner platform.""" from pythinkingcleaner import ThinkingCleaner, Discovery if config.get(CONF_HOST) is None: discovery = Discovery() devices = discovery.discover() else: host = config.get(CONF_HOST) devices = [ThinkingCleaner(host, 'unknown')] @util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS) def update_devices(): """Update all devices.""" for device_object in devices: device_object.update() dev = [] for device in devices: for type_name in SWITCH_TYPES: dev.append(ThinkingCleanerSwitch( device, type_name, update_devices)) add_entities(dev)
def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the ThinkingCleaner platform.""" from pythinkingcleaner import ThinkingCleaner, Discovery host = config.get(CONF_HOST) if host: devices = [ThinkingCleaner(host, 'unknown')] else: discovery = Discovery() devices = discovery.discover() @util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS) def update_devices(): """Update all devices.""" for device_object in devices: device_object.update() dev = [] for device in devices: for type_name in SWITCH_TYPES: dev.append(ThinkingCleanerSwitch( device, type_name, update_devices)) add_entities(dev)
40,374
def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]], edge_index: Tensor, edge_attr: Optional[Tensor] = None, relabel_nodes: bool = False, num_nodes: Tuple[int, int] = None, return_edge_mask: bool = False): r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`, for a bipartite graph. Args: subset (PairTensor or tuple([int],[int])): The nodes to keep. edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) relabel_nodes (bool, optional): If set to :obj:`True`, the resulting :obj:`edge_index` will be relabeled to hold consecutive indices starting from zero. (default: :obj:`False`) num_nodes (tuple, optional): The number of nodes. (default: :obj:`None`) return_edge_mask (bool, optional): If set to :obj:`True`, will return the edge mask to filter out additional edge features. (default: :obj:`False`) :rtype: (:class:`LongTensor`, :class:`Tensor`) """ device = edge_index.device if isinstance(subset[0], (list, tuple)): subset = (torch.tensor(subset[0], dtype=torch.long, device=device), torch.tensor(subset[1], dtype=torch.long, device=device)) if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8: num_nodes = subset[0].size(0), subset[1].size(0) else: if num_nodes is None: num_nodes = (maybe_num_nodes(edge_index[0]), maybe_num_nodes(edge_index[1])) subset = (index_to_mask(subset[0], size=num_nodes[0]), index_to_mask(subset[1], size=num_nodes[1])) node_mask_i, node_mask_j = subset[0], subset[1] edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long, device=device) node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long, device=device) node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(), device=device) node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(), device=device) edge_index = torch.stack( [node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]]) if return_edge_mask: return edge_index, edge_attr, edge_mask else: return edge_index, edge_attr
def bipartite_subgraph(subset: Union[PairTensor, Tuple[List[int], List[int]]], edge_index: Tensor, edge_attr: Optional[Tensor] = None, relabel_nodes: bool = False, num_nodes: Tuple[int, int] = None, return_edge_mask: bool = False): r"""Returns the induced subgraph of :obj:`(edge_index, edge_attr)` containing the nodes in :obj:`subset`, for a bipartite graph. Args: subset (PairTensor or tuple([int],[int])): The nodes to keep. edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) relabel_nodes (bool, optional): If set to :obj:`True`, the resulting :obj:`edge_index` will be relabeled to hold consecutive indices starting from zero. (default: :obj:`False`) size (Tuple[int, int], optional): The size of the bipartite graph. (default: :obj:`None`) return_edge_mask (bool, optional): If set to :obj:`True`, will return the edge mask to filter out additional edge features. (default: :obj:`False`) :rtype: (:class:`LongTensor`, :class:`Tensor`) """ device = edge_index.device if isinstance(subset[0], (list, tuple)): subset = (torch.tensor(subset[0], dtype=torch.long, device=device), torch.tensor(subset[1], dtype=torch.long, device=device)) if subset[0].dtype == torch.bool or subset[0].dtype == torch.uint8: num_nodes = subset[0].size(0), subset[1].size(0) else: if num_nodes is None: num_nodes = (maybe_num_nodes(edge_index[0]), maybe_num_nodes(edge_index[1])) subset = (index_to_mask(subset[0], size=num_nodes[0]), index_to_mask(subset[1], size=num_nodes[1])) node_mask_i, node_mask_j = subset[0], subset[1] edge_mask = node_mask_i[edge_index[0]] & node_mask_j[edge_index[1]] edge_index = edge_index[:, edge_mask] edge_attr = edge_attr[edge_mask] if edge_attr is not None else None if relabel_nodes: node_idx_i = torch.zeros(node_mask_i.size(0), dtype=torch.long, device=device) node_idx_j = torch.zeros(node_mask_j.size(0), dtype=torch.long, device=device) node_idx_i[subset[0]] = torch.arange(subset[0].sum().item(), device=device) node_idx_j[subset[1]] = torch.arange(subset[1].sum().item(), device=device) edge_index = torch.stack( [node_idx_i[edge_index[0]], node_idx_j[edge_index[1]]]) if return_edge_mask: return edge_index, edge_attr, edge_mask else: return edge_index, edge_attr
6,783
def reopen_closed_assignment(doc): try: todo = frappe.get_list('ToDo', dict( reference_type = doc.doctype, reference_name = doc.docname, status = 'Closed' ))[0] except IndexError: return False todo = frappe.get_doc("ToDo", todo.get('name')) todo.status = 'Open' todo.save() return True
def reopen_closed_assignment(doc): try: todo = frappe.db.exists('ToDo', dict( reference_type = doc.doctype, reference_name = doc.docname, status = 'Closed' ))[0] except IndexError: return False todo = frappe.get_doc("ToDo", todo.get('name')) todo.status = 'Open' todo.save() return True
30,434
def parse_docker_image(docker_image): """Verify that the docker image is of demisto format & parse the name and tag Args: docker_image: String representation of the docker image name and tag Returns: The name and the tag of the docker image """ if docker_image: try: tag = re.findall(r'(demisto\/.+):.+', docker_image, re.IGNORECASE)[0] name = re.findall(r'demisto\/.+:(.+)', docker_image, re.IGNORECASE)[0] return name, tag except IndexError: print_error("The docker image isn't of format - demisto/image_name:0.0.0") return '', '' else: # If the yml file has no docker image we provide the default one 'demisto/python:1.3-alpine' return 'demisto/python', '1.3-alpine'
def parse_docker_image(docker_image): """Verify that the docker image is of demisto format & parse the name and tag Args: docker_image: String representation of the docker image name and tag Returns: The name and the tag of the docker image """ if docker_image: try: tag = re.findall(r'(demisto\/.+):.+', docker_image, re.IGNORECASE)[0] name = re.findall(r'demisto\/.+:(.+)', docker_image, re.IGNORECASE)[0] return name, tag except IndexError: print_error("The docker image isn't of format - demisto/image_name:X.X.X") return '', '' else: # If the yml file has no docker image we provide the default one 'demisto/python:1.3-alpine' return 'demisto/python', '1.3-alpine'
49,455
def main(): module = KatelloEntityAnsibleModule( entity_spec=dict( name=dict(required=True), friendly_name=dict(), scc_account=dict(required=True), state=dict(default='subscribe', choices=['subscribe']), ), ) entity_dict = module.clean_params() module.connect() entity_dict['organization'] = module.find_resource_by_name('organizations', entity_dict['organization'], thin=True) scope = {'organization_id': entity_dict['organization']['id']} entity_dict['scc_account'] = module.find_resource_by_name('scc_accounts', name=entity_dict['scc_account'], params=scope, thin=True) scope = {'scc_account_id': entity_dict['scc_account']['id']} # Try to find the SccProduct to work on # name is however not unique, but friendly_name is entity = None # If we have a friendly_name, search for it if 'friendly_name' in entity_dict and entity_dict['friendly_name'] != '': search_string = 'friendly_name="{0}"'.format(entity_dict['friendly_name']) entity = module.find_resource('scc_products', search_string, params=scope, failsafe=True) if module.state == 'subscribe': result = module.foremanapi.resource('scc_products').call('subscribe', {'id': entity['id'], 'scc_account_id': scope['scc_account_id']}) if result: module.set_changed() module.exit_json()
def main(): module = KatelloEntityAnsibleModule( entity_spec=dict( name=dict(required=True), friendly_name=dict(), scc_account=dict(required=True), state=dict(default='subscribe', choices=['subscribe']), ), ) entity_dict = module.clean_params() module.connect() entity_dict['organization'] = module.find_resource_by_name('organizations', entity_dict['organization'], thin=True) scope = {'organization_id': entity_dict['organization']['id']} entity_dict['scc_account'] = module.find_resource_by_name('scc_accounts', name=entity_dict['scc_account'], params=scope, thin=True) scope = {'scc_account_id': entity_dict['scc_account']['id']} # Try to find the SccProduct to work on # name is however not unique, but friendly_name is entity = None # If we have a friendly_name, search for it if 'friendly_name' in entity_dict and entity_dict['friendly_name'] != '': search_string = 'friendly_name="{0}"'.format(entity_dict['friendly_name']) entity = module.find_resource('scc_products', search_string, params=scope, failsafe=True) if module.state == 'subscribe': params = {'id': entity['id']} params.update(scope) result = module.foremanapi.resource('scc_products').call('subscribe', params) if result: module.set_changed() module.exit_json()
27,557
def send_metric(params): """Send anonymous metric over HTTP for tracking""" # Add to queue and *maybe* send if the user allows it metric_queue.append(params) # Check if the user wants to send metrics and errors if s.get("send_metrics"): for metric_params in metric_queue: url_params = urllib.parse.urlencode(metric_params) url = "http://www.google-analytics.com/collect?%s" % url_params # Send metric HTTP data try: r = requests.get(url, headers={"user-agent": user_agent}, verify=False) log.info("Track metric: [%s] %s | (%s bytes)" % (r.status_code, r.url, len(r.content))) except Exception as ex: log.warning("Failed to Track metric: %s", ex) # Wait a moment, so we don't spam the requests time.sleep(0.25) # All metrics have been sent (or attempted to send) # Clear the queue metric_queue.clear()
def send_metric(params): """Send anonymous metric over HTTP for tracking""" # Add to queue and *maybe* send if the user allows it metric_queue.append(params) # Check if the user wants to send metrics and errors if s.get("send_metrics"): for metric_params in metric_queue: url_params = urllib.parse.urlencode(metric_params) url = "http://www.google-analytics.com/collect?%s" % url_params # Send metric HTTP data try: r = requests.get(url, headers={"user-agent": user_agent}, verify=False) log.info("Track metric: [%s] %s | (%s bytes)" % (r.status_code, r.url, len(r.content))) except Exception as ex: log.warning("Failed to track metric", exc_info=1) # Wait a moment, so we don't spam the requests time.sleep(0.25) # All metrics have been sent (or attempted to send) # Clear the queue metric_queue.clear()
23,001
def main(args): global ready, proxy_server check_create_data_path() log_info() CertUtil.init_ca() allow_remote = args.get("allow_remote", 0) listen_ips = front.config.listen_ip if isinstance(listen_ip, str): listen_ips = [listen_ips] else: listen_ips = list(listen_ips) if allow_remote and ("0.0.0.0" not in listen_ips or "::" not in listen_ips): listen_ips.append("0.0.0.0") addresses = [(listen_ip, front.config.listen_port) for listen_ip in listen_ips] front.start() direct_front.start() proxy_server = simple_http_server.HTTPServer( addresses, proxy_handler.GAEProxyHandler, logger=xlog) ready = True # checked by launcher.module_init proxy_server.serve_forever()
def main(args): global ready, proxy_server check_create_data_path() log_info() CertUtil.init_ca() allow_remote = args.get("allow_remote", 0) listen_ips = front.config.listen_ip if not isinstance(listen_ips, list): listen_ips = [listen_ips] else: listen_ips = list(listen_ips) if allow_remote and ("0.0.0.0" not in listen_ips or "::" not in listen_ips): listen_ips.append("0.0.0.0") addresses = [(listen_ip, front.config.listen_port) for listen_ip in listen_ips] front.start() direct_front.start() proxy_server = simple_http_server.HTTPServer( addresses, proxy_handler.GAEProxyHandler, logger=xlog) ready = True # checked by launcher.module_init proxy_server.serve_forever()
26,219
def gather_lldp(module, lldpctl_docker_cmd, skip_interface_pattern_list): rc, output, err = module.run_command(lldpctl_docker_cmd) if output: output_dict = {} current_dict = {} lldp_entries = output.split("\n") skip_interface_pattern_str = '(?:% s)' % '|'.join(skip_interface_pattern_list) if skip_interface_pattern_list else None for entry in lldp_entries: if entry.startswith('lldp'): path, value = entry.strip().split("=", 1) path = path.split(".") if skip_interface_pattern_list and re.match(skip_interface_pattern_str, path[1]): continue path_components, final = path[:-1], path[-1] else: value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) current_dict = current_dict[path_component] current_dict[final] = value return output_dict
def gather_lldp(module, lldpctl_docker_cmd, skip_interface_pattern_list): rc, output, err = module.run_command(lldpctl_docker_cmd) if output: output_dict = {} current_dict = {} lldp_entries = output.split("\n") skip_interface_pattern_str = '(?:% s)' % '|'.join(skip_interface_pattern_list) if skip_interface_pattern_list else None for entry in lldp_entries: if entry.startswith("lldp"): path, value = entry.strip().split("=", 1) path = path.split(".") if skip_interface_pattern_list and re.match(skip_interface_pattern_str, path[1]): continue path_components, final = path[:-1], path[-1] else: value = current_dict[final] + '\n' + entry current_dict = output_dict for path_component in path_components: current_dict[path_component] = current_dict.get(path_component, {}) current_dict = current_dict[path_component] current_dict[final] = value return output_dict
895
def periodicity(f, symbol, check=False): """ Tests the given function for periodicity in the given symbol. Parameters ========== f : Expr. The concerned function. symbol : Symbol The variable for which the period is to be determined. check : Boolean, optional The flag to verify whether the value being returned is a period or not. Returns ======= period The period of the function is returned. `None` is returned when the function is aperiodic or has a complex period. The value of `0` is returned as the period of a constant function. Raises ====== NotImplementedError The value of the period computed cannot be verified. Notes ===== Currently, we do not support functions with a complex period. The period of functions having complex periodic values such as `exp`, `sinh` is evaluated to `None`. The value returned might not be the "fundamental" period of the given function i.e. it may not be the smallest periodic value of the function. The verification of the period through the `check` flag is not reliable due to internal simplification of the given expression. Hence, it is set to `False` by default. Examples ======== >>> from sympy import Symbol, sin, cos, tan, exp >>> from sympy.calculus.util import periodicity >>> x = Symbol('x') >>> f = sin(x) + sin(2*x) + sin(3*x) >>> periodicity(f, x) 2*pi >>> periodicity(sin(x)*cos(x), x) pi >>> periodicity(exp(tan(2*x) - 1), x) pi/2 >>> periodicity(sin(4*x)**cos(2*x), x) pi >>> periodicity(exp(x), x) """ from sympy.core.mod import Mod from sympy.core.relational import Relational from sympy.functions.elementary.exponential import exp from sympy.functions.elementary.complexes import Abs from sympy.functions.elementary.trigonometric import ( TrigonometricFunction, sin, cos, csc, sec) from sympy.simplify.simplify import simplify from sympy.solvers.decompogen import decompogen from sympy.polys.polytools import degree temp = Dummy('x', real=True) f = f.subs(symbol, temp) symbol = temp def _check(orig_f, period): '''Return the checked period or raise an error.''' new_f = orig_f.subs(symbol, symbol + period) if new_f.equals(orig_f): return period else: raise NotImplementedError(filldedent(''' The period of the given function cannot be verified. When `%s` was replaced with `%s + %s` in `%s`, the result was `%s` which was not recognized as being the same as the original function. So either the period was wrong or the two forms were not recognized as being equal. Set check=False to obtain the value.''' % (symbol, symbol, period, orig_f, new_f))) orig_f = f period = None if isinstance(f, Relational): f = f.lhs - f.rhs f = simplify(f) if symbol not in f.free_symbols: return S.Zero if isinstance(f, TrigonometricFunction): try: period = f.period(symbol) except NotImplementedError: pass if isinstance(f, Abs): arg = f.args[0] if isinstance(arg, (sec, csc, cos)): # all but tan and cot might have a # a period that is half as large # so recast as sin arg = sin(arg.args[0]) period = periodicity(arg, symbol) if period is not None and isinstance(arg, sin): # the argument of Abs was a trigonometric other than # cot or tan; test to see if the half-period # is valid. Abs(arg) has behaviour equivalent to # orig_f, so use that for test: orig_f = Abs(arg) try: return _check(orig_f, period/2) except NotImplementedError as err: if check: raise NotImplementedError(err) # else let new orig_f and period be # checked below if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1): f = Pow(S.Exp1, expand_mul(f.exp)) if im(f) != 0: period_real = periodicity(re(f), symbol) period_imag = periodicity(im(f), symbol) if period_real is not None and period_imag is not None: period = lcim([period_real, period_imag]) if f.is_Pow and f.base != S.Exp1: base, expo = f.args base_has_sym = base.has(symbol) expo_has_sym = expo.has(symbol) if base_has_sym and not expo_has_sym: period = periodicity(base, symbol) elif expo_has_sym and not base_has_sym: period = periodicity(expo, symbol) else: period = _periodicity(f.args, symbol) elif f.is_Mul: coeff, g = f.as_independent(symbol, as_Add=False) if isinstance(g, TrigonometricFunction) or coeff is not S.One: period = periodicity(g, symbol) else: period = _periodicity(g.args, symbol) elif f.is_Add: k, g = f.as_independent(symbol) if k is not S.Zero: return periodicity(g, symbol) period = _periodicity(g.args, symbol) elif isinstance(f, Mod): a, n = f.args if a == symbol: period = n elif isinstance(a, TrigonometricFunction): period = periodicity(a, symbol) #check if 'f' is linear in 'symbol' elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and symbol not in n.free_symbols): period = Abs(n / a.diff(symbol)) elif isinstance(f, Piecewise): # Returning None, as the return type, period of the `piecewise` # also should be a piecewise. (i.e. the return type is not favorable) return None elif period is None: from sympy.solvers.decompogen import compogen g_s = decompogen(f, symbol) num_of_gs = len(g_s) if num_of_gs > 1: for index, g in enumerate(reversed(g_s)): start_index = num_of_gs - 1 - index g = compogen(g_s[start_index:], symbol) if g != orig_f and g != f: # Fix for issue 12620 period = periodicity(g, symbol) if period is not None: break if period is not None: if check: return _check(orig_f, period) return period return None
def periodicity(f, symbol, check=False): """ Tests the given function for periodicity in the given symbol. Parameters ========== f : Expr. The concerned function. symbol : Symbol The variable for which the period is to be determined. check : Boolean, optional The flag to verify whether the value being returned is a period or not. Returns ======= period The period of the function is returned. `None` is returned when the function is aperiodic or has a complex period. The value of `0` is returned as the period of a constant function. Raises ====== NotImplementedError The value of the period computed cannot be verified. Notes ===== Currently, we do not support functions with a complex period. The period of functions having complex periodic values such as `exp`, `sinh` is evaluated to `None`. The value returned might not be the "fundamental" period of the given function i.e. it may not be the smallest periodic value of the function. The verification of the period through the `check` flag is not reliable due to internal simplification of the given expression. Hence, it is set to `False` by default. Examples ======== >>> from sympy import Symbol, sin, cos, tan, exp >>> from sympy.calculus.util import periodicity >>> x = Symbol('x') >>> f = sin(x) + sin(2*x) + sin(3*x) >>> periodicity(f, x) 2*pi >>> periodicity(sin(x)*cos(x), x) pi >>> periodicity(exp(tan(2*x) - 1), x) pi/2 >>> periodicity(sin(4*x)**cos(2*x), x) pi >>> periodicity(exp(x), x) """ from sympy.core.mod import Mod from sympy.core.relational import Relational from sympy.functions.elementary.exponential import exp from sympy.functions.elementary.complexes import Abs from sympy.functions.elementary.trigonometric import ( TrigonometricFunction, sin, cos, csc, sec) from sympy.simplify.simplify import simplify from sympy.solvers.decompogen import decompogen from sympy.polys.polytools import degree temp = Dummy('x', real=True) f = f.subs(symbol, temp) symbol = temp def _check(orig_f, period): '''Return the checked period or raise an error.''' new_f = orig_f.subs(symbol, symbol + period) if new_f.equals(orig_f): return period else: raise NotImplementedError(filldedent(''' The period of the given function cannot be verified. When `%s` was replaced with `%s + %s` in `%s`, the result was `%s` which was not recognized as being the same as the original function. So either the period was wrong or the two forms were not recognized as being equal. Set check=False to obtain the value.''' % (symbol, symbol, period, orig_f, new_f))) orig_f = f period = None if isinstance(f, Relational): f = f.lhs - f.rhs f = simplify(f) if symbol not in f.free_symbols: return S.Zero if isinstance(f, TrigonometricFunction): try: period = f.period(symbol) except NotImplementedError: pass if isinstance(f, Abs): arg = f.args[0] if isinstance(arg, (sec, csc, cos)): # all but tan and cot might have a # a period that is half as large # so recast as sin arg = sin(arg.args[0]) period = periodicity(arg, symbol) if period is not None and isinstance(arg, sin): # the argument of Abs was a trigonometric other than # cot or tan; test to see if the half-period # is valid. Abs(arg) has behaviour equivalent to # orig_f, so use that for test: orig_f = Abs(arg) try: return _check(orig_f, period/2) except NotImplementedError as err: if check: raise NotImplementedError(err) # else let new orig_f and period be # checked below if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1): f = Pow(S.Exp1, expand_mul(f.exp)) if im(f) != 0: period_real = periodicity(re(f), symbol) period_imag = periodicity(im(f), symbol) if period_real is not None and period_imag is not None: period = lcim([period_real, period_imag]) if f.is_Pow and f.base != S.Exp1: base, expo = f.args base_has_sym = base.has(symbol) expo_has_sym = expo.has(symbol) if base_has_sym and not expo_has_sym: period = periodicity(base, symbol) elif expo_has_sym and not base_has_sym: period = periodicity(expo, symbol) else: period = _periodicity(f.args, symbol) elif f.is_Mul: coeff, g = f.as_independent(symbol, as_Add=False) if isinstance(g, TrigonometricFunction) or coeff is not S.One: period = periodicity(g, symbol) else: period = _periodicity(g.args, symbol) elif f.is_Add: k, g = f.as_independent(symbol) if k is not S.Zero: return periodicity(g, symbol) period = _periodicity(g.args, symbol) elif isinstance(f, Mod): a, n = f.args if a == symbol: period = n elif isinstance(a, TrigonometricFunction): period = periodicity(a, symbol) #check if 'f' is linear in 'symbol' elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and symbol not in n.free_symbols): period = Abs(n / a.diff(symbol)) elif isinstance(f, Piecewise): pass # not handling Piecewise yet elif period is None: from sympy.solvers.decompogen import compogen g_s = decompogen(f, symbol) num_of_gs = len(g_s) if num_of_gs > 1: for index, g in enumerate(reversed(g_s)): start_index = num_of_gs - 1 - index g = compogen(g_s[start_index:], symbol) if g != orig_f and g != f: # Fix for issue 12620 period = periodicity(g, symbol) if period is not None: break if period is not None: if check: return _check(orig_f, period) return period return None
44,535
def serve_command_handler(env_grpc_port=False, grpc_port=3000): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) analyze_pb2_grpc.add_AnalyzeServiceServicer_to_server( AnalyzerEngine(), server) if env_grpc_port: port = os.environ.get('GRPC_PORT') if port is not None or port != '': grpc_port = int(port) server.add_insecure_port('[::]:' + str(grpc_port)) logging.info("Starting GRPC listener at port %d", grpc_port) server.start() try: while True: time.sleep(1) except KeyboardInterrupt: server.stop(0)
def serve_command_handler(env_grpc_port=False, grpc_port=3000): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) analyze_pb2_grpc.add_AnalyzeServiceServicer_to_server( AnalyzerEngine(), server) if env_grpc_port: port = os.environ.get('GRPC_PORT') if port is not None or port != '': grpc_port = int(port) server.add_insecure_port('[::]:' + str(grpc_port)) logging.info("Starting GRPC listener at port {}".format(grpc_port)) server.start() try: while True: time.sleep(1) except KeyboardInterrupt: server.stop(0)
8,650
def find_config(config_dir, name, extension='.cfg'): """Build the absolute path for the given configuration file ``name`` :param str config_dir: path to the configuration directory :param str name: configuration file ``name`` :param str extension: configuration file's extension (default to ``.cfg``) :return: the path of the configuration file, either in the current directory or from the ``config_dir`` directory This function tries different location: * the current directory, * the ``config_dir`` directory with the ``extension`` suffix, * the ``config_dir`` directory without a suffix, Example:: >>> from sopel import run_script >>> os.listdir() ['local.cfg', 'extra.ini'] >>> os.listdir(config.DEFAULT_HOMEDIR) ['config.cfg', 'extra.ini', 'module.cfg', 'README'] >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg') 'local.cfg' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local') '/home/username/.sopel/local' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config') '/home/username/.sopel/config.cfg' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini') '/home/username/.sopel/extra.ini' """ if os.path.isfile(name): return name name_ext = name + extension for config in enumerate_configs(config_dir, extension): if name_ext == config: return os.path.join(config_dir, name_ext) return os.path.join(config_dir, name)
def find_config(config_dir, name, extension='.cfg'): """Build the absolute path for the given configuration file ``name`` :param str config_dir: path to the configuration directory :param str name: configuration file ``name`` :param str extension: configuration file's extension (default to ``.cfg``) :return: the path of the configuration file, either in the current directory or from the ``config_dir`` directory This function tries different location: * the current directory, * the ``config_dir`` directory with the ``extension`` suffix * the ``config_dir`` directory without a suffix, Example:: >>> from sopel import run_script >>> os.listdir() ['local.cfg', 'extra.ini'] >>> os.listdir(config.DEFAULT_HOMEDIR) ['config.cfg', 'extra.ini', 'module.cfg', 'README'] >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local.cfg') 'local.cfg' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'local') '/home/username/.sopel/local' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'config') '/home/username/.sopel/config.cfg' >>> run_script.find_config(config.DEFAULT_HOMEDIR, 'extra', '.ini') '/home/username/.sopel/extra.ini' """ if os.path.isfile(name): return name name_ext = name + extension for config in enumerate_configs(config_dir, extension): if name_ext == config: return os.path.join(config_dir, name_ext) return os.path.join(config_dir, name)
29,803
def is_pod_completed(pod: V1Pod): condition = get_pod_condition(pod, "ContainersReady") return condition.reason == "PodCompleted" if condition else False
def is_pod_completed(pod: V1Pod) -> bool: condition = get_pod_condition(pod, "ContainersReady") return condition.reason == "PodCompleted" if condition else False
4,444
def match_event_names(event_names, keys, *, on_missing='raise'): """Search a collection of event names for matching (sub-)groups of events. This function is particularly helpful when using grouped event names (i.e., event names containing forward slashes ``/``). Assuming for example the following event names in the data:: event_names = [ 'auditory/left', 'auditory/right', 'visual/left', 'visual/right' ] you could easily query for all ``auditory``, and ``left`` events:: >>> match_event_names( ... event_names=event_names, ... keys=['auditory', 'left'] ... ) ['auditory/left', 'auditory/right', 'visual/left'] Parameters ---------- event_names : array-like of str | dict Either a collection of event names, or the ``event_id`` dictionary mapping event names to event codes. keys : array-like of str | str One or multiple event names or groups to search for in ``event_names``. on_missing : 'raise' | 'warn' | 'ignore' How to handle situations when none of the ``keys`` can be found in ``event_names``. If ``'warn'`` or ``'ignore'``, an empty list will be returned. Returns ------- matches : list of str All event names that match any of the ``keys`` provided. Notes ----- .. versionadded:: 1.0 """ _check_on_missing(on_missing) if isinstance(event_names, dict): event_names = list(event_names.keys()) # ensure we have a list of `keys` if ( isinstance(keys, (Sequence, np.ndarray)) and not isinstance(keys, str) ): keys = list(keys) else: keys = [keys] matches = [] # form the hierarchical event name mapping for key in keys: if not isinstance(key, str): raise ValueError(f'keys must be strings, got {type(key)} ({key})') matches.extend( name for name in event_names if set(key.split('/')).issubset(name.split('/')) ) if not matches: _on_missing( on_missing=on_missing, msg=f'Event name "{key}" could not be found. The following events ' f'are present in the data: {", ".join(event_names)}', error_klass=KeyError ) matches = sorted(set(matches)) # deduplicate if necessary return matches
def match_event_names(event_names, keys, *, on_missing='raise'): """Search a collection of event names for matching (sub-)groups of events. This function is particularly helpful when using grouped event names (i.e., event names containing forward slashes ``/``). Assuming for example the following event names in the data:: event_names = [ 'auditory/left', 'auditory/right', 'visual/left', 'visual/right' ] you could easily query for all ``auditory`` and ``left`` event names:: >>> match_event_names( ... event_names=event_names, ... keys=['auditory', 'left'] ... ) ['auditory/left', 'auditory/right', 'visual/left'] Parameters ---------- event_names : array-like of str | dict Either a collection of event names, or the ``event_id`` dictionary mapping event names to event codes. keys : array-like of str | str One or multiple event names or groups to search for in ``event_names``. on_missing : 'raise' | 'warn' | 'ignore' How to handle situations when none of the ``keys`` can be found in ``event_names``. If ``'warn'`` or ``'ignore'``, an empty list will be returned. Returns ------- matches : list of str All event names that match any of the ``keys`` provided. Notes ----- .. versionadded:: 1.0 """ _check_on_missing(on_missing) if isinstance(event_names, dict): event_names = list(event_names.keys()) # ensure we have a list of `keys` if ( isinstance(keys, (Sequence, np.ndarray)) and not isinstance(keys, str) ): keys = list(keys) else: keys = [keys] matches = [] # form the hierarchical event name mapping for key in keys: if not isinstance(key, str): raise ValueError(f'keys must be strings, got {type(key)} ({key})') matches.extend( name for name in event_names if set(key.split('/')).issubset(name.split('/')) ) if not matches: _on_missing( on_missing=on_missing, msg=f'Event name "{key}" could not be found. The following events ' f'are present in the data: {", ".join(event_names)}', error_klass=KeyError ) matches = sorted(set(matches)) # deduplicate if necessary return matches
4,623
def _plot_surf_matplotlib(coords, faces, surf_map=None, bg_map=None, hemi='left', view='lateral', cmap=None, colorbar=False, avg_method='mean', threshold=None, alpha='auto', bg_on_data=False, darkness=1, vmin=None, vmax=None, cbar_vmin=None, cbar_vmax=None, cbar_tick_format='%.2g', title=None, title_font_size=18, output_file=None, axes=None, figure=None, **kwargs): """Helper function for plot_surf. This function handles surface plotting when the selected engine is matplotlib. """ _default_figsize = [4, 4] limits = [coords.min(), coords.max()] # set view elev, azim = _set_view_plot_surf_matplotlib(hemi, view) # if no cmap is given, set to matplotlib default if cmap is None: cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap']) # if cmap is given as string, translate to matplotlib cmap elif isinstance(cmap, str): cmap = plt.cm.get_cmap(cmap) figsize = _default_figsize # Leave space for colorbar if colorbar: figsize[0] += .7 # initiate figure and 3d axes if axes is None: if figure is None: figure = plt.figure(figsize=figsize) axes = figure.add_axes((0, 0, 1, 1), projection="3d") else: if figure is None: figure = axes.get_figure() axes.set_xlim(*limits) axes.set_ylim(*limits) axes.view_init(elev=elev, azim=azim) axes.set_axis_off() # plot mesh without data p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') # reduce viewing distance to remove space around mesh axes.dist = 8 face_colors = _compute_facecolors_matplotlib( bg_map, faces, coords.shape[0], darkness, alpha ) if surf_map is not None: surf_map_faces = _compute_surf_map_faces_matplotlib( surf_map, faces, avg_method, coords.shape[0], face_colors.shape[0] ) surf_map_faces, kept_indices, vmin, vmax = _threshold_and_rescale( surf_map_faces, threshold, vmin, vmax ) # multiply data with background if indicated if bg_on_data: face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\ * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(surf_map_faces[kept_indices]) if colorbar: cbar_vmin = cbar_vmin if cbar_vmin is not None else vmin cbar_vmax = cbar_vmax if cbar_vmax is not None else vmax ticks = _get_ticks_matplotlib(cbar_vmin, cbar_vmax, cbar_tick_format) our_cmap, norm = _get_cmap_matplotlib(cmap, cbar_vmin, cbar_vmax, threshold) bounds = np.linspace(cbar_vmin, cbar_vmax, our_cmap.N) # we need to create a proxy mappable proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm) proxy_mappable.set_array(surf_map_faces) cax, kw = make_axes(axes, location='right', fraction=.15, shrink=.5, pad=.0, aspect=10.) figure.colorbar( proxy_mappable, cax=cax, ticks=ticks, boundaries=bounds, spacing='proportional', format=cbar_tick_format, orientation='vertical') p3dcollec.set_facecolors(face_colors) if title is not None: figure.suptitle(title, x=.5, y=.95, fontsize=title_font_size) # save figure if output file is given if output_file is not None: figure.savefig(output_file) plt.close() else: return figure
def _plot_surf_matplotlib(coords, faces, surf_map=None, bg_map=None, hemi='left', view='lateral', cmap=None, colorbar=False, avg_method='mean', threshold=None, alpha='auto', bg_on_data=False, darkness=1, vmin=None, vmax=None, cbar_vmin=None, cbar_vmax=None, cbar_tick_format='%.2g', title=None, title_font_size=18, output_file=None, axes=None, figure=None, **kwargs): """Helper function for plot_surf. This function handles surface plotting when the selected engine is matplotlib. """ _default_figsize = [4, 4] limits = [coords.min(), coords.max()] # set view elev, azim = _set_view_plot_surf_matplotlib(hemi, view) # if no cmap is given, set to matplotlib default if cmap is None: cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap']) # if cmap is given as string, translate to matplotlib cmap elif isinstance(cmap, str): cmap = plt.cm.get_cmap(cmap) figsize = _default_figsize # Leave space for colorbar if colorbar: figsize[0] += .7 # initiate figure and 3d axes if axes is None: if figure is None: figure = plt.figure(figsize=figsize) axes = figure.add_axes((0, 0, 1, 1), projection="3d") else: if figure is None: figure = axes.get_figure() axes.set_xlim(*limits) axes.set_ylim(*limits) axes.view_init(elev=elev, azim=azim) axes.set_axis_off() # plot mesh without data p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') # reduce viewing distance to remove space around mesh axes.dist = 8 face_colors = _compute_facecolors_matplotlib( bg_map, faces, coords.shape[0], darkness, alpha ) if surf_map is not None: surf_map_faces = _compute_surf_map_faces_matplotlib( surf_map, faces, avg_method, coords.shape[0], face_colors.shape[0] ) surf_map_faces, kept_indices, vmin, vmax = _threshold_and_rescale( surf_map_faces, threshold, vmin, vmax ) # multiply data with background if indicated if bg_on_data: face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\ * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(surf_map_faces[kept_indices]) if colorbar: cbar_vmin = cbar_vmin if cbar_vmin is not None else vmin cbar_vmax = cbar_vmax if cbar_vmax is not None else vmax ticks = _get_ticks_matplotlib(cbar_vmin, cbar_vmax, cbar_tick_format) our_cmap, norm = _get_cmap_matplotlib(cmap, vmin, vmax, threshold) bounds = np.linspace(cbar_vmin, cbar_vmax, our_cmap.N) # we need to create a proxy mappable proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm) proxy_mappable.set_array(surf_map_faces) cax, kw = make_axes(axes, location='right', fraction=.15, shrink=.5, pad=.0, aspect=10.) figure.colorbar( proxy_mappable, cax=cax, ticks=ticks, boundaries=bounds, spacing='proportional', format=cbar_tick_format, orientation='vertical') p3dcollec.set_facecolors(face_colors) if title is not None: figure.suptitle(title, x=.5, y=.95, fontsize=title_font_size) # save figure if output file is given if output_file is not None: figure.savefig(output_file) plt.close() else: return figure
23,038
def drop_by_shallow_copy(df, columns): """ Use shallow copy to drop columns in place """ df2 = df.copy(deep=False) if isinstance(columns, str): columns = [columns] for column in columns: del df2[column] return df2
def drop_by_shallow_copy(df, columns): """ Use shallow copy to drop columns in place """ df2 = df.copy(deep=False) if not isinstance(columns, (list, pd.Index)): columns = [columns] for column in columns: del df2[column] return df2
10,904
def find_library_path(lib_filename): """ Search library by file name in the system Return absolute path to existing libraries :params lib_filename: name of library file """ lib_abspath = None os_type = get_os_type() try: lib_obj = ctypes.cdll.LoadLibrary(lib_filename) except OSError: _log.info("Library '%s' not found in host system", lib_filename) else: # ctypes.util.find_library only accepts unversioned library names if os_type == LINUX: # find path to library with dlinfo lib_abspath = locate_solib(lib_obj) elif os_type == DARWIN: # ctypes.macholib.dyld.dyld_find accepts file names and returns full path lib_abspath = ctypes.macholib.dyld.dyld_find(lib_filename) else: raise EasyBuildError("Unknown host OS type: %s", os_type) _log.info("Found absolute path to %s: %s" % (lib_filename, lib_abspath)) return lib_abspath
def find_library_path(lib_filename): """ Search library by file name in the system Return absolute path to existing libraries :params lib_filename: name of library file """ lib_abspath = None os_type = get_os_type() try: lib_obj = ctypes.cdll.LoadLibrary(lib_filename) except OSError: _log.info("Library '%s' not found in host system", lib_filename) else: # ctypes.util.find_library only accepts unversioned library names if os_type == LINUX: # find path to library with dlinfo lib_abspath = locate_solib(lib_obj) elif os_type == DARWIN: # ctypes.macholib.dyld.dyld_find accepts file names and returns full path lib_abspath = ctypes.macholib.dyld.dyld_find(lib_filename) else: raise EasyBuildError("Unknown host OS type: %s", os_type) _log.info("Found absolute path to %s: %s", lib_filename, lib_abspath) return lib_abspath
46,018
def build_laplacian_pyramid( input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[torch.Tensor]: r"""Construct the Laplacian pyramid for an image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """ if not isinstance(input, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not len(input.shape) == 4: raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid
def build_laplacian_pyramid( input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[Tensor]: r"""Construct the Laplacian pyramid for an image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """ if not isinstance(input, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not len(input.shape) == 4: raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid
27,240
def test_spark_dtype_to_ibis_dtype(): from ibis.backends.spark import _SPARK_DTYPE_TO_IBIS_DTYPE assert len(_SPARK_DTYPE_TO_IBIS_DTYPE.keys()) == len( set(_SPARK_DTYPE_TO_IBIS_DTYPE.values()) )
def test_spark_dtype_to_ibis_dtype(): from . import _SPARK_DTYPE_TO_IBIS_DTYPE assert len(_SPARK_DTYPE_TO_IBIS_DTYPE.keys()) == len( set(_SPARK_DTYPE_TO_IBIS_DTYPE.values()) )
1,016
def test_LineInfo(): """Simple test for LineInfo construction and str()""" linfo = LineInfo(' %cd /home') str(linfo) == 'LineInfo [ |%|cd|/home]'
def test_LineInfo(): """Simple test for LineInfo construction and str()""" linfo = LineInfo(' %cd /home') assert str(linfo) == 'LineInfo [ |%|cd|/home]'
6,840
def has_user_permission(doc, user=None): '''Returns True if User is allowed to view considering User Permissions''' from frappe.core.doctype.user_permission.user_permission import get_user_permissions user_permissions = get_user_permissions(user) if not user_permissions: # no user permission rules specified for this doctype return True # user can create own role permissions, so nothing applies if get_role_permissions('User Permission', user=user).get('write'): return True apply_strict_user_permissions = frappe.get_system_settings('apply_strict_user_permissions') doctype = doc.get('doctype') docname = doc.get('name') # STEP 1: --------------------- # check user permissions on self if doctype in user_permissions: allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(doctype, []), doctype) # if allowed_docs is empty it states that there is no applicable permission under the current doctype # only check if allowed_docs is not empty if docname != None and allowed_docs and docname not in allowed_docs: # no user permissions for this doc specified push_perm_check_log(_('Not allowed for {0}: {1}').format(_(doctype), docname)) return False # STEP 2: --------------------------------- # check user permissions in all link fields def check_user_permission_on_link_fields(d): # check user permissions for all the link fields of the given # document object d # # called for both parent and child records meta = frappe.get_meta(d.get("doctype")) # check all link fields for user permissions for field in meta.get_link_fields(): if field.ignore_user_permissions: continue # empty value, do you still want to apply user permissions? if not d.get(field.fieldname) and not apply_strict_user_permissions: # nah, not strict continue if field.options not in user_permissions: continue # get the list of all allowed values for this link allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(field.options, []), doctype) if allowed_docs and d.get(field.fieldname) not in allowed_docs: # restricted for this link field, and no matching values found # make the right message and exit if d.get('parentfield'): # "Not allowed for Company = Restricted Company in Row 3. Restricted field: reference_type" msg = _('Not allowed for {0}: {1} in Row {2}. Restricted field: {3}').format( _(field.options), d.get(field.fieldname), d.idx, field.fieldname) else: # "Not allowed for Company = Restricted Company. Restricted field: reference_type" msg = _('Not allowed for {0}: {1}. Restricted field: {2}').format( _(field.options), d.get(field.fieldname), field.fieldname) push_perm_check_log(msg) return False return True if not check_user_permission_on_link_fields(doc): return False for d in doc.get_all_children(): if not check_user_permission_on_link_fields(d): return False return True
def has_user_permission(doc, user=None): '''Returns True if User is allowed to view considering User Permissions''' from frappe.core.doctype.user_permission.user_permission import get_user_permissions user_permissions = get_user_permissions(user) if not user_permissions: # no user permission rules specified for this doctype return True # user can create own role permissions, so nothing applies if get_role_permissions('User Permission', user=user).get('write'): return True apply_strict_user_permissions = frappe.get_system_settings('apply_strict_user_permissions') doctype = doc.get('doctype') docname = doc.get('name') # STEP 1: --------------------- # check user permissions on self if doctype in user_permissions: allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(doctype, []), doctype) # if allowed_docs is empty it states that there is no applicable permission under the current doctype # only check if allowed_docs is not empty if docname and allowed_docs and docname not in allowed_docs: # no user permissions for this doc specified push_perm_check_log(_('Not allowed for {0}: {1}').format(_(doctype), docname)) return False # STEP 2: --------------------------------- # check user permissions in all link fields def check_user_permission_on_link_fields(d): # check user permissions for all the link fields of the given # document object d # # called for both parent and child records meta = frappe.get_meta(d.get("doctype")) # check all link fields for user permissions for field in meta.get_link_fields(): if field.ignore_user_permissions: continue # empty value, do you still want to apply user permissions? if not d.get(field.fieldname) and not apply_strict_user_permissions: # nah, not strict continue if field.options not in user_permissions: continue # get the list of all allowed values for this link allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(field.options, []), doctype) if allowed_docs and d.get(field.fieldname) not in allowed_docs: # restricted for this link field, and no matching values found # make the right message and exit if d.get('parentfield'): # "Not allowed for Company = Restricted Company in Row 3. Restricted field: reference_type" msg = _('Not allowed for {0}: {1} in Row {2}. Restricted field: {3}').format( _(field.options), d.get(field.fieldname), d.idx, field.fieldname) else: # "Not allowed for Company = Restricted Company. Restricted field: reference_type" msg = _('Not allowed for {0}: {1}. Restricted field: {2}').format( _(field.options), d.get(field.fieldname), field.fieldname) push_perm_check_log(msg) return False return True if not check_user_permission_on_link_fields(doc): return False for d in doc.get_all_children(): if not check_user_permission_on_link_fields(d): return False return True
9,602
def _match_installed_flat_name(module, binary, name, method): # This is a difficult function, since if th euser supplies a flatpakref url, # we have to rely on a naming convention: # The flatpakref file name needs to match the flatpak name global result parsed_name = _parse_flatpak_name(name) # Try running flatpak list with columns feature command = "{0} list --{1} --app --columns=application".format(binary, method) _flatpak_command(module, False, command, ignore_failure=True) if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: # Probably flatpak before 1.2 matched_flatpak_name = \ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) else: # Probably flatpak >= 1.2 matched_flatpak_name = \ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) if matched_flatpak_name: return matched_flatpak_name else: result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ "If you used a URL, try using the reverse DNS name of the flatpak" module.fail_json(**result)
def _match_installed_flat_name(module, binary, name, method): # This is a difficult function, since if the user supplies a flatpakref url, # we have to rely on a naming convention: # The flatpakref file name needs to match the flatpak name global result parsed_name = _parse_flatpak_name(name) # Try running flatpak list with columns feature command = "{0} list --{1} --app --columns=application".format(binary, method) _flatpak_command(module, False, command, ignore_failure=True) if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']: # Probably flatpak before 1.2 matched_flatpak_name = \ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method) else: # Probably flatpak >= 1.2 matched_flatpak_name = \ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method) if matched_flatpak_name: return matched_flatpak_name else: result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\ "If you used a URL, try using the reverse DNS name of the flatpak" module.fail_json(**result)
43,185
def initialize_model_parallel( tensor_model_parallel_size_: int = 1, pipeline_model_parallel_size_: int = 1, virtual_pipeline_model_parallel_size_: Optional[int] = None, pipeline_model_parallel_split_rank_: Optional[int] = None, *, default_backend: Optional[str] = None, p2p_backend: Optional[str] = None, ) -> None: """ Initialize model data parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used to parallelize model tensor. pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline. virtual_pipeline_model_parallel_size: number of virtual stages (interleaved pipeline). pipeline_model_parallel_split_rank: for models with both encoder and decoder, rank in pipeline with split point. Keyword Arguments: default_backend: Backend of process groups except for pipeline parallel ones. If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used. p2p_backend: Backend of process groups for pipeline model parallel. If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used. .. note:: `torch_ucc <https://github.com/facebookresearch/torch_ucc>`_ is necessary for "ucc" backend. Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 8 tensor model-parallel groups, 4 pipeline model-parallel groups and 8 data-parallel groups as: 8 data_parallel groups: [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] 8 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] 4 pipeline model-parallel groups: [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() assert default_backend is None or default_backend in ("nccl", "ucc") assert p2p_backend is None or p2p_backend in ("nccl", "ucc") if "ucc" in (default_backend, p2p_backend): check_torch_ucc_availability() warnings.warn("`ucc` backend support is experimental", ExperimentalWarning) if default_backend == "ucc": warnings.warn("The UCC's functionalit as `default_backend` is not well verified", ExperimentalWarning) world_size: int = torch.distributed.get_world_size() tensor_model_parallel_size: int = min(tensor_model_parallel_size_, world_size) pipeline_model_parallel_size: int = min(pipeline_model_parallel_size_, world_size) if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0: raise RuntimeError( f"`world_size` ({world_size}) is not divisible by tensor_model_parallel_size ({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})" ) data_parallel_size: int = world_size // ( tensor_model_parallel_size * pipeline_model_parallel_size ) if torch.distributed.get_rank() == 0: _logger.info( "> initializing tensor model parallel with size {}".format( tensor_model_parallel_size ) ) _logger.info( "> initializing pipeline model parallel with size {}".format( pipeline_model_parallel_size ) ) _logger.info( "> initializing data parallel with size {}".format(data_parallel_size) ) num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size num_data_parallel_groups: int = world_size // data_parallel_size if virtual_pipeline_model_parallel_size_ is not None: # assert pipeline_model_parallel_size_ > 2, ( # "pipeline-model-parallel size should be greater than 2 with " # "interleaved schedule" # ) global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0 _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = ( virtual_pipeline_model_parallel_size_ ) if pipeline_model_parallel_split_rank_ is not None: global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank_ rank = torch.distributed.get_rank() # Build the data-parallel groups. global _DATA_PARALLEL_GROUP assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized" all_data_parallel_group_ranks = [] for i in range(pipeline_model_parallel_size): start_rank = i * num_pipeline_model_parallel_groups end_rank = (i + 1) * num_pipeline_model_parallel_groups for j in range(tensor_model_parallel_size): ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) all_data_parallel_group_ranks.append(list(ranks)) group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _DATA_PARALLEL_GROUP = group # Build the model-parallel groups. global _MODEL_PARALLEL_GROUP assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized" for i in range(data_parallel_size): ranks = [ data_parallel_group_ranks[i] for data_parallel_group_ranks in all_data_parallel_group_ranks ] group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _MODEL_PARALLEL_GROUP = group # Build the tensor model-parallel groups. global _TENSOR_MODEL_PARALLEL_GROUP assert ( _TENSOR_MODEL_PARALLEL_GROUP is None ), "tensor model parallel group is already initialized" for i in range(num_tensor_model_parallel_groups): ranks = list( range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) ) group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _TENSOR_MODEL_PARALLEL_GROUP = group # Build the pipeline model-parallel groups and embedding groups # (first and last rank in each pipeline model-parallel group). global _PIPELINE_MODEL_PARALLEL_GROUP global _PIPELINE_GLOBAL_RANKS assert ( _PIPELINE_MODEL_PARALLEL_GROUP is None ), "pipeline model parallel group is already initialized" global _EMBEDDING_GROUP global _EMBEDDING_GLOBAL_RANKS assert _EMBEDDING_GROUP is None, "embedding group is already initialized" global _POSITION_EMBEDDING_GROUP global _POSITION_EMBEDDING_GLOBAL_RANKS assert ( _POSITION_EMBEDDING_GROUP is None ), "position embedding group is already initialized" for i in range(num_pipeline_model_parallel_groups): ranks = range(i, world_size, num_pipeline_model_parallel_groups) group = torch.distributed.new_group(ranks, backend=p2p_backend) if rank in ranks: _PIPELINE_MODEL_PARALLEL_GROUP = group _PIPELINE_GLOBAL_RANKS = ranks # Setup embedding group (to exchange gradients between # first and last stages). if len(ranks) > 1: embedding_ranks = [ranks[0], ranks[-1]] position_embedding_ranks = [ranks[0]] if pipeline_model_parallel_split_rank_ is not None: if ranks[pipeline_model_parallel_split_rank_] not in embedding_ranks: embedding_ranks = [ ranks[0], ranks[pipeline_model_parallel_split_rank_], ranks[-1], ] if ( ranks[pipeline_model_parallel_split_rank_] not in position_embedding_ranks ): position_embedding_ranks = [ ranks[0], ranks[pipeline_model_parallel_split_rank_], ] else: embedding_ranks = ranks position_embedding_ranks = ranks group = torch.distributed.new_group(embedding_ranks, backend=default_backend) if rank in embedding_ranks: _EMBEDDING_GROUP = group if rank in ranks: _EMBEDDING_GLOBAL_RANKS = embedding_ranks group = torch.distributed.new_group(position_embedding_ranks, backend=default_backend) if rank in position_embedding_ranks: _POSITION_EMBEDDING_GROUP = group if rank in ranks: _POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
def initialize_model_parallel( tensor_model_parallel_size_: int = 1, pipeline_model_parallel_size_: int = 1, virtual_pipeline_model_parallel_size_: Optional[int] = None, pipeline_model_parallel_split_rank_: Optional[int] = None, *, default_backend: Optional[str] = None, p2p_backend: Optional[str] = None, ) -> None: """ Initialize model data parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used to parallelize model tensor. pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline. virtual_pipeline_model_parallel_size: number of virtual stages (interleaved pipeline). pipeline_model_parallel_split_rank: for models with both encoder and decoder, rank in pipeline with split point. Keyword Arguments: default_backend: Backend of process groups except for pipeline parallel ones. If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used. p2p_backend: Backend of process groups for pipeline model parallel. If :obj:`None`, the backend specified in `torch.distributed.init_process_group` will be used. .. note:: `torch_ucc <https://github.com/facebookresearch/torch_ucc>`_ is necessary for "ucc" backend. Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 8 tensor model-parallel groups, 4 pipeline model-parallel groups and 8 data-parallel groups as: 8 data_parallel groups: [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] 8 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] 4 pipeline model-parallel groups: [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() assert default_backend is None or default_backend in ("nccl", "ucc") assert p2p_backend is None or p2p_backend in ("nccl", "ucc") if "ucc" in (default_backend, p2p_backend): check_torch_ucc_availability() warnings.warn("`ucc` backend support is experimental", ExperimentalWarning) if default_backend == "ucc": warnings.warn("The UCC's functionality as `default_backend` is not well verified", ExperimentalWarning) world_size: int = torch.distributed.get_world_size() tensor_model_parallel_size: int = min(tensor_model_parallel_size_, world_size) pipeline_model_parallel_size: int = min(pipeline_model_parallel_size_, world_size) if world_size % (tensor_model_parallel_size * pipeline_model_parallel_size) != 0: raise RuntimeError( f"`world_size` ({world_size}) is not divisible by tensor_model_parallel_size ({tensor_model_parallel_size}) x pipeline_model_parallel_size ({pipeline_model_parallel_size})" ) data_parallel_size: int = world_size // ( tensor_model_parallel_size * pipeline_model_parallel_size ) if torch.distributed.get_rank() == 0: _logger.info( "> initializing tensor model parallel with size {}".format( tensor_model_parallel_size ) ) _logger.info( "> initializing pipeline model parallel with size {}".format( pipeline_model_parallel_size ) ) _logger.info( "> initializing data parallel with size {}".format(data_parallel_size) ) num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size num_data_parallel_groups: int = world_size // data_parallel_size if virtual_pipeline_model_parallel_size_ is not None: # assert pipeline_model_parallel_size_ > 2, ( # "pipeline-model-parallel size should be greater than 2 with " # "interleaved schedule" # ) global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0 _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = ( virtual_pipeline_model_parallel_size_ ) if pipeline_model_parallel_split_rank_ is not None: global _PIPELINE_MODEL_PARALLEL_SPLIT_RANK _PIPELINE_MODEL_PARALLEL_SPLIT_RANK = pipeline_model_parallel_split_rank_ rank = torch.distributed.get_rank() # Build the data-parallel groups. global _DATA_PARALLEL_GROUP assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized" all_data_parallel_group_ranks = [] for i in range(pipeline_model_parallel_size): start_rank = i * num_pipeline_model_parallel_groups end_rank = (i + 1) * num_pipeline_model_parallel_groups for j in range(tensor_model_parallel_size): ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) all_data_parallel_group_ranks.append(list(ranks)) group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _DATA_PARALLEL_GROUP = group # Build the model-parallel groups. global _MODEL_PARALLEL_GROUP assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized" for i in range(data_parallel_size): ranks = [ data_parallel_group_ranks[i] for data_parallel_group_ranks in all_data_parallel_group_ranks ] group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _MODEL_PARALLEL_GROUP = group # Build the tensor model-parallel groups. global _TENSOR_MODEL_PARALLEL_GROUP assert ( _TENSOR_MODEL_PARALLEL_GROUP is None ), "tensor model parallel group is already initialized" for i in range(num_tensor_model_parallel_groups): ranks = list( range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size) ) group = torch.distributed.new_group(ranks, backend=default_backend) if rank in ranks: _TENSOR_MODEL_PARALLEL_GROUP = group # Build the pipeline model-parallel groups and embedding groups # (first and last rank in each pipeline model-parallel group). global _PIPELINE_MODEL_PARALLEL_GROUP global _PIPELINE_GLOBAL_RANKS assert ( _PIPELINE_MODEL_PARALLEL_GROUP is None ), "pipeline model parallel group is already initialized" global _EMBEDDING_GROUP global _EMBEDDING_GLOBAL_RANKS assert _EMBEDDING_GROUP is None, "embedding group is already initialized" global _POSITION_EMBEDDING_GROUP global _POSITION_EMBEDDING_GLOBAL_RANKS assert ( _POSITION_EMBEDDING_GROUP is None ), "position embedding group is already initialized" for i in range(num_pipeline_model_parallel_groups): ranks = range(i, world_size, num_pipeline_model_parallel_groups) group = torch.distributed.new_group(ranks, backend=p2p_backend) if rank in ranks: _PIPELINE_MODEL_PARALLEL_GROUP = group _PIPELINE_GLOBAL_RANKS = ranks # Setup embedding group (to exchange gradients between # first and last stages). if len(ranks) > 1: embedding_ranks = [ranks[0], ranks[-1]] position_embedding_ranks = [ranks[0]] if pipeline_model_parallel_split_rank_ is not None: if ranks[pipeline_model_parallel_split_rank_] not in embedding_ranks: embedding_ranks = [ ranks[0], ranks[pipeline_model_parallel_split_rank_], ranks[-1], ] if ( ranks[pipeline_model_parallel_split_rank_] not in position_embedding_ranks ): position_embedding_ranks = [ ranks[0], ranks[pipeline_model_parallel_split_rank_], ] else: embedding_ranks = ranks position_embedding_ranks = ranks group = torch.distributed.new_group(embedding_ranks, backend=default_backend) if rank in embedding_ranks: _EMBEDDING_GROUP = group if rank in ranks: _EMBEDDING_GLOBAL_RANKS = embedding_ranks group = torch.distributed.new_group(position_embedding_ranks, backend=default_backend) if rank in position_embedding_ranks: _POSITION_EMBEDDING_GROUP = group if rank in ranks: _POSITION_EMBEDDING_GLOBAL_RANKS = position_embedding_ranks
40,856
def inference_pipeline(batch_size, process_fn=None, transform_fns=None, length_bucket_width=None, length_fn=None, num_threads=None, prefetch_buffer_size=None): """Transformation that applies dataset operations for inference. Example: >>> dataset = dataset.apply(opennmt.data.inference_pipeline(...)) Args: batch_size: The batch size to use. process_fn: The processing function to apply on each element. transform_fns: list of transformation functions (only map for inference) to apply on the batch. More generic than `process_fn`. length_bucket_width: The width of the length buckets to select batch candidates from. If set, this means the inference pipeline will be reordered based on the examples length, the application is then responsible to restore the predictions in order. An "index" key will be inserted in the examples dictionary. length_fn: A function mapping features to a sequence length. num_threads: The number of elements processed in parallel. prefetch_buffer_size: The number of batches to prefetch asynchronously. If ``None``, use an automatically tuned value. Returns: A ``tf.data.Dataset`` transformation. Raises: ValueError: if :obj:`length_bucket_width` is set but not :obj:`length_fn`. ValueError: if :obj:`length_bucket_width` is set but the dataset does not output a dictionary structure. """ def _inject_index(index, x): x["index"] = index return x def _pipeline(dataset): if process_fn is not None: dataset = dataset.map(process_fn, num_parallel_calls=num_threads) for transform_fn in transform_fns: dataset = dataset.apply(transform_fn) if length_bucket_width is not None and length_bucket_width > 0: if length_fn is None: raise ValueError("length_fn is required when reordering by length") if not isinstance(_get_output_shapes(dataset), dict): raise ValueError("Reordering by length expects dataset elements to be Python dicts") dataset = dataset.enumerate() dataset = dataset.map(_inject_index) dataset = dataset.apply(batch_sequence_dataset( batch_size, length_bucket_width=length_bucket_width, length_fn=length_fn)) else: dataset = dataset.apply(batch_dataset(batch_size)) dataset = dataset.prefetch(prefetch_buffer_size) return dataset return _pipeline
def inference_pipeline(batch_size, process_fn=None, transform_fns=None, length_bucket_width=None, length_fn=None, num_threads=None, prefetch_buffer_size=None): """Transformation that applies dataset operations for inference. Example: >>> dataset = dataset.apply(opennmt.data.inference_pipeline(...)) Args: batch_size: The batch size to use. process_fn: The processing function to apply on each element. transform_fns: list of transformation functions (only map for inference) to apply on the dataset. More generic than `process_fn`. length_bucket_width: The width of the length buckets to select batch candidates from. If set, this means the inference pipeline will be reordered based on the examples length, the application is then responsible to restore the predictions in order. An "index" key will be inserted in the examples dictionary. length_fn: A function mapping features to a sequence length. num_threads: The number of elements processed in parallel. prefetch_buffer_size: The number of batches to prefetch asynchronously. If ``None``, use an automatically tuned value. Returns: A ``tf.data.Dataset`` transformation. Raises: ValueError: if :obj:`length_bucket_width` is set but not :obj:`length_fn`. ValueError: if :obj:`length_bucket_width` is set but the dataset does not output a dictionary structure. """ def _inject_index(index, x): x["index"] = index return x def _pipeline(dataset): if process_fn is not None: dataset = dataset.map(process_fn, num_parallel_calls=num_threads) for transform_fn in transform_fns: dataset = dataset.apply(transform_fn) if length_bucket_width is not None and length_bucket_width > 0: if length_fn is None: raise ValueError("length_fn is required when reordering by length") if not isinstance(_get_output_shapes(dataset), dict): raise ValueError("Reordering by length expects dataset elements to be Python dicts") dataset = dataset.enumerate() dataset = dataset.map(_inject_index) dataset = dataset.apply(batch_sequence_dataset( batch_size, length_bucket_width=length_bucket_width, length_fn=length_fn)) else: dataset = dataset.apply(batch_dataset(batch_size)) dataset = dataset.prefetch(prefetch_buffer_size) return dataset return _pipeline
8,434
def test_regression_658(): # see https://github.com/astropy/specutils/issues/658 for issue context obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type='cartesian') coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs) # coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access coord.target = SkyCoord(ra=10.68470833*u.deg, dec=41.26875*u.deg)
def test_spectral_coord_from_sky_coord_without_distance(): # see https://github.com/astropy/specutils/issues/658 for issue context obs = SkyCoord(0 * u.m, 0 * u.m, 0 * u.m, representation_type='cartesian') coord = SpectralCoord([1, 2, 3] * u.micron, observer=obs) # coord.target = SkyCoord.from_name('m31') # <- original issue, but below is the same but requires no remote data access coord.target = SkyCoord(ra=10.68470833*u.deg, dec=41.26875*u.deg)
54,827
def chop_in_blocks_multi(m, idtodelete): """ Splits an array of (symmetric) matrices each into 3 blocks, A, B, C Blocks A and C are diagonal blocks and B is the offdiagonal block idtodelete specifies which indices go into C. """ A = np.copy(m) A = np.delete(A, idtodelete, axis=1) A = np.delete(A, idtodelete, axis=2) B = np.delete(m[:, :, idtodelete], idtodelete, axis=1) C = m[:, idtodelete, :][:, :, idtodelete] return (A, B, C)
def chop_in_blocks_multi(m, idtodelete): """ Splits an array of (symmetric) matrices each into 3 blocks (``A``, ``B``, ``C``). Blocks ``A`` and ``C`` are diagonal blocks and ``B`` is the offdiagonal block. Args: m (ndarray): array of matrices id_to_delete (ndarray): array for the indices that go into ``C`` Returns: tuple: tuple of the ``A``, ``B`` and ``C`` matrices """ A = np.copy(m) A = np.delete(A, idtodelete, axis=1) A = np.delete(A, idtodelete, axis=2) B = np.delete(m[:, :, idtodelete], idtodelete, axis=1) C = m[:, idtodelete, :][:, :, idtodelete] return (A, B, C)
57,764
def main() -> None: """main function, parses params and runs command functions """ params = demisto.params() command = demisto.command() args = demisto.args() demisto.debug(f'Command being called is {command}') try: client = Client( app_id=params.get('app_id', ''), subscription_id=params.get('subscription_id', ''), resource_group_name=params.get('resource_group_name', ''), verify=not params.get('insecure', False), proxy=params.get('proxy', False), ) commands = { 'azure-sql-servers-list': azure_sql_servers_list_command, 'azure-sql-db-list': azure_sql_db_list_command, 'azure-sql-db-audit-policy-list': azure_sql_db_audit_policy_list_command, 'azure-sql-db-audit-policy-create-update': azure_sql_db_audit_policy_create_update_command, 'azure-sql-db-threat-policy-get': azure_sql_db_threat_policy_get_command, 'azure-sql-db-threat-policy-create-update': azure_sql_db_threat_policy_create_update_command, 'azure-sql-auth-start': start_auth, 'azure-sql-auth-complete': complete_auth, 'azure-sql-auth-reset': reset_auth, } if command == 'test-module': return_error("Please run `!azure-sql-auth-start` and `!azure-sql-auth-complete` to log in." " For more details press the (?) button.") if command == 'azure-sql-auth-test': return_results(test_connection(client)) else: return_results(commands[command](client, **args)) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
def main() -> None: """main function, parses params and runs command functions """ params = demisto.params() command = demisto.command() args = demisto.args() demisto.debug(f'Command being called is {command}') try: client = Client( app_id=params.get('app_id', ''), subscription_id=params.get('subscription_id', ''), resource_group_name=params.get('resource_group_name', ''), verify=not params.get('insecure', False), proxy=params.get('proxy', False), ) commands = { 'azure-sql-servers-list': azure_sql_servers_list_command, 'azure-sql-db-list': azure_sql_db_list_command, 'azure-sql-db-audit-policy-list': azure_sql_db_audit_policy_list_command, 'azure-sql-db-audit-policy-create-update': azure_sql_db_audit_policy_create_update_command, 'azure-sql-db-threat-policy-get': azure_sql_db_threat_policy_get_command, 'azure-sql-db-threat-policy-create-update': azure_sql_db_threat_policy_create_update_command, 'azure-sql-auth-start': start_auth, 'azure-sql-auth-complete': complete_auth, 'azure-sql-auth-reset': reset_auth, } if command == 'test-module': return_error("Please run `!azure-sql-auth-start` and `!azure-sql-auth-complete` to log in. You can validate the connection by running `!azure-sql-auth-test`\n" " For more details press the (?) button.") if command == 'azure-sql-auth-test': return_results(test_connection(client)) else: return_results(commands[command](client, **args)) # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
30,851
def hash_multiple(value, fields_to_hash, to_hash=False): if isinstance(value, list): if not value: return [] else: return list(map(lambda x: hash_multiple(x, fields_to_hash, to_hash), value)) if isinstance(value, dict): for k, v in value.items(): _hash = to_hash or is_key_match_fields_to_hash(k, fields_to_hash) value[k] = hash_multiple(v, fields_to_hash, _hash) return value else: try: if isinstance(value, (int, float, bool)): to_hash = False if not isinstance(value, str): value = str(value) except Exception: value = "" if to_hash and value: return hash_value(value) else: return value
def hash_multiple(value, fields_to_hash, to_hash=False): if isinstance(value, list): return list(map(lambda x: hash_multiple(x, fields_to_hash, to_hash), value)) if isinstance(value, dict): for k, v in value.items(): _hash = to_hash or is_key_match_fields_to_hash(k, fields_to_hash) value[k] = hash_multiple(v, fields_to_hash, _hash) return value else: try: if isinstance(value, (int, float, bool)): to_hash = False if not isinstance(value, str): value = str(value) except Exception: value = "" if to_hash and value: return hash_value(value) else: return value
6,844
def update_assignments(old, new, doctype): old_assignments = frappe.parse_json(frappe.db.get_value(doctype, old, '_assign')) or [] new_assignments = frappe.parse_json(frappe.db.get_value(doctype, new, '_assign')) or [] common_assignments = list(set(old_assignments).intersection(new_assignments)) for user in common_assignments: # delete todos linked to old doc todos = frappe.db.get_all('ToDo', { 'owner': user, 'reference_type': doctype, 'reference_name': old, }, ['name', 'description'] ) for todo in todos: frappe.delete_doc('ToDo', todo.name) unique_assignments = list(set(old_assignments + new_assignments)) frappe.db.set_value(doctype, new, '_assign', json.dumps(unique_assignments))
def update_assignments(old, new, doctype): old_assignments = frappe.parse_json(frappe.db.get_value(doctype, old, '_assign')) or [] new_assignments = frappe.parse_json(frappe.db.get_value(doctype, new, '_assign')) or [] common_assignments = list(set(old_assignments).intersection(new_assignments)) for user in common_assignments: # delete todos linked to old doc todos = frappe.db.get_all('ToDo', { 'owner': user, 'reference_type': doctype, 'reference_name': old, }, ['name', 'description'] ) for todo in todos: frappe.delete_doc('ToDo', todo.name) unique_assignments = list(set(old_assignments + new_assignments)) frappe.db.set_value(doctype, new, '_assign', frappe.as_json(unique_assignments), indent=0)
22,716
def main(): print('Gather runtime data') try: subprocess.check_output(['choco', '--version']) except subprocess.CalledProcessError: raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs' 'to be installed to run this script.') script_path = os.path.realpath(__file__) repo_path = os.path.dirname(os.path.dirname(script_path)) build_path = os.path.join(repo_path, 'windows-installer', 'build') venv_path = os.path.join(build_path, 'venv-config') venv_python = os.path.join(venv_path, 'Scripts', 'python.exe') installer_cfg_path = os.path.join(build_path, 'installer.cfg') wheels_path = os.path.join(build_path, 'wheels') certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'], universal_newlines=True, cwd=repo_path).strip() certbot_packages = ['acme', '.'] certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')]) print('Copy assets') os.makedirs(build_path, exist_ok=True) shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path) shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path) print('Prepare pynsist config') with open(os.path.join(installer_cfg_path), 'w') as file_h: file_h.write("""\ [Application] name=Certbot version={certbot_version} icon=certbot.ico publisher=Electronic Frontier Fundation script=run.py [Build] directory=nsis installer_name=certbot-{certbot_version}-win32_install.exe [Python] version=3.7.0 [Include] local_wheels=wheels\*.whl [Command certbot] entry_point=certbot.main:main """.format(certbot_version=certbot_version)) print('Prepare build environment') subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path]) subprocess.check_call(['choco', 'upgrade', '-y', 'nsis']) subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip']) shutil.rmtree(wheels_path, ignore_errors=True) os.makedirs(wheels_path, exist_ok=True) subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist']) print('Compile wheels') wheels_project = [os.path.join(repo_path, package) for package in certbot_packages] command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path] command.extend(wheels_project) subprocess.check_call(command) print('Build the installer') subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path]) print('Done')
def main(): print('Gather runtime data') try: subprocess.check_output(['choco', '--version']) except subprocess.CalledProcessError: raise RuntimeError('Error: Chocolatey (https://chocolatey.org/) needs' 'to be installed to run this script.') script_path = os.path.realpath(__file__) repo_path = os.path.dirname(os.path.dirname(script_path)) build_path = os.path.join(repo_path, 'windows-installer', 'build') venv_path = os.path.join(build_path, 'venv-config') venv_python = os.path.join(venv_path, 'Scripts', 'python.exe') installer_cfg_path = os.path.join(build_path, 'installer.cfg') wheels_path = os.path.join(build_path, 'wheels') certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'], universal_newlines=True, cwd=repo_path).strip() certbot_packages = ['acme', '.'] certbot_packages.extend([name for name in os.listdir(repo_path) if name.startswith('certbot-dns-')]) print('Copy assets') os.makedirs(build_path, exist_ok=True) shutil.copy(os.path.join(repo_path, 'windows-installer', 'certbot.ico'), build_path) shutil.copy(os.path.join(repo_path, 'windows-installer', 'run.py'), build_path) print('Prepare pynsist config') with open(os.path.join(installer_cfg_path), 'w') as file_h: file_h.write("""\ [Application] name=Certbot version={certbot_version} icon=certbot.ico publisher=Electronic Frontier Foundation script=run.py [Build] directory=nsis installer_name=certbot-{certbot_version}-win32_install.exe [Python] version=3.7.0 [Include] local_wheels=wheels\*.whl [Command certbot] entry_point=certbot.main:main """.format(certbot_version=certbot_version)) print('Prepare build environment') subprocess.check_call([sys.executable, '-m', 'venv', '--clear', venv_path]) subprocess.check_call(['choco', 'upgrade', '-y', 'nsis']) subprocess.check_call([venv_python, '-m', 'pip', 'install', '--upgrade', 'pip']) shutil.rmtree(wheels_path, ignore_errors=True) os.makedirs(wheels_path, exist_ok=True) subprocess.check_call([venv_python, '-m', 'pip', 'install', 'wheel', 'pynsist']) print('Compile wheels') wheels_project = [os.path.join(repo_path, package) for package in certbot_packages] command = [venv_python, '-m', 'pip', 'wheel', '-w', wheels_path] command.extend(wheels_project) subprocess.check_call(command) print('Build the installer') subprocess.check_call([os.path.join(venv_path, 'Scripts', 'pynsist.exe'), installer_cfg_path]) print('Done')
47,162
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on concatenated texts", # not sure if it's right ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size))) losses = torch.cat(losses) losses = losses[: len(eval_dataset)] try: perplexity = math.exp(torch.mean(losses)) except OverflowError: perplexity = float("inf") logger.info(f"epoch {epoch}: perplexity: {perplexity}") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {max_seq_length}" ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size))) losses = torch.cat(losses) losses = losses[: len(eval_dataset)] try: perplexity = math.exp(torch.mean(losses)) except OverflowError: perplexity = float("inf") logger.info(f"epoch {epoch}: perplexity: {perplexity}") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
58,190
def parse_pyformat_insertion_values(text: str) -> str: """Parses the text after 'VALUES' in an insertion query and return a pair of balanced parentheses with any text in between. Args: text: the VALUES clause of an insertion query, should have no leading whitespace """ open = 0 in_quotes = False for i in range(len(text)): if not in_quotes: if text[i] == "(": open += 1 elif text[i] == ")": open -= 1 elif text[i] == "'": in_quotes = True elif text[i] == "'": in_quotes = False if open == 0: return text[: i + 1] elif open < 0: raise ValueError("Parser finds more closing brackets than opening brackets") raise ValueError("Parser cannot find balanced parentheses.")
def parse_pyformat_insertion_values(text: str) -> str: """Parses the text after 'VALUES' in an insertion query and return a pair of balanced parentheses with any text in between. Args: text: the VALUES clause of an insertion query, should have no leading whitespace """ open = 0 in_quotes = False for i in range(len(text)): if not in_quotes: if text[i] == "(": open += 1 elif text[i] == ")": open -= 1 if text[i] == "'": in_quotes = not in_quotes if open == 0: return text[: i + 1] elif open < 0: raise ValueError("Parser finds more closing brackets than opening brackets") raise ValueError("Parser cannot find balanced parentheses.")
36,982
def test_image_array_old_wandb_mp_warning(test_settings, capsys, monkeypatch): monkeypatch.setattr(wandb.util, "_get_max_cli_version", lambda: "0.10.33") with mock.patch.dict("os.environ", WANDB_REQUIRE_SERVICE="True"): with wandb.init(settings=test_settings) as run: wb_image = [wandb.Image(image) for _ in range(5)] run._init_pid += 1 run.log({"logged_images": wb_image}) outerr = capsys.readouterr() assert ( "Trying to log a sequence of Image(s) from multiple processes might cause for data loss. Please upgrade your wandb server" in outerr.err )
def test_image_array_old_wandb_mp_warning(test_settings, capsys, monkeypatch): monkeypatch.setattr(wandb.util, "_get_max_cli_version", lambda: "0.10.33") with mock.patch.dict("os.environ", WANDB_REQUIRE_SERVICE="true"): with wandb.init(settings=test_settings) as run: wb_image = [wandb.Image(image) for _ in range(5)] run._init_pid += 1 run.log({"logged_images": wb_image}) outerr = capsys.readouterr() assert ( "Trying to log a sequence of Image(s) from multiple processes might cause for data loss. Please upgrade your wandb server" in outerr.err )
40,202
def join_lines_to_polylines(lines): """Join polylines from lines. The polylines stop at points connectng more than two lines. Parameters ---------- lines : list List of lines as tuples of their extremity coordinates. Returns ------- polylines: list The polylines. If the polyline is closed, the two extremities are the same. """ # create graph from line extremities network = Network.from_lines([(line[0], line[-1]) for line in lines]) polylines = [] edges_to_visit = list(network.edges()) # initiate a polyline from an unvisited edge while len(edges_to_visit) > 0: polyline = list(edges_to_visit.pop()) # get adjacent edges until the polyline is closed... while polyline[0] != polyline[-1]: # ... or until both end are non-two-valent vertices if len(network.vertex_neighbors(polyline[-1])) != 2: polyline = list(reversed(polyline)) if len(network.vertex_neighbors(polyline[-1])) != 2: break # add next edge polyline.append([nbr for nbr in network.vertex_neighbors(polyline[-1]) if nbr != polyline[-2]][0]) # delete polyline edges from the list of univisted edges for u, v in pairwise(polyline): if (u, v) in edges_to_visit: edges_to_visit.remove((u, v)) elif (v, u) in edges_to_visit: edges_to_visit.remove((v, u)) polylines.append(polyline) return [[network.vertex_coordinates(vkey) for vkey in polyline]for polyline in polylines]
def join_lines_to_polylines(lines): """Join polylines from lines. The polylines stop at points connectng more than two lines. Parameters ---------- lines : list List of lines as tuples of their extremity coordinates. Returns ------- polylines: list The polylines. If the polyline is closed, the two extremities are the same. """ # create graph from line extremities network = Network.from_lines([(line[0], line[-1]) for line in lines]) polylines = [] edges_to_visit = list(network.edges()) # initiate a polyline from an unvisited edge while len(edges_to_visit) > 0: polyline = list(edges_to_visit.pop()) # get adjacent edges until the polyline is closed... while polyline[0] != polyline[-1]: # ... or until both end are non-two-valent vertices if len(network.vertex_neighbors(polyline[-1])) != 2: polyline = list(reversed(polyline)) if len(network.vertex_neighbors(polyline[-1])) != 2: break # add next edge polyline.append([nbr for nbr in network.vertex_neighbors(polyline[-1]) if nbr != polyline[-2]][0]) # delete polyline edges from the list of univisted edges for u, v in pairwise(polyline): if (u, v) in edges_to_visit: edges_to_visit.remove((u, v)) elif (v, u) in edges_to_visit: edges_to_visit.remove((v, u)) polylines.append(polyline) return [[network.vertex_coordinates(vkey) for vkey in polyline] for polyline in polylines]
25,986
def load_arguments(self, _): # Model imports StorageAccountTypes = self.get_models('StorageAccountTypes') DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`", id_part='name') extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's data disk.") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = "Name of the image builder run output." c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL." " Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'") c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.") c.argument('image_template_name', image_template_name_type, help="The name of the image template.") c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group="Image Source") ib_customizer_type = CLIArgumentType(arg_group="Customizer") ib_cutput_type = CLIArgumentType(arg_group="Output") c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = "Space-separated list of regions to replicate the image version into." ib_img_location_help = "Location where the customized image will be created." c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.") c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.") c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"]) ib_default_loc_help = " Defaults to resource group's location." c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart") ib_win_update_type = CLIArgumentType(arg_group="Windows Update") ib_script_type = CLIArgumentType(arg_group="Shell and Powershell") ib_powershell_type = CLIArgumentType(arg_group="Powershell") ib_file_customizer_type = CLIArgumentType(arg_group="File") c.argument('customizer_name', help="Name of the customizer.") c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot. It is part of trusted launch.') c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM. It is part of trusted launch.') c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help="image sku's version") c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help="show all information including vm sizes not available under the current subscription") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group") c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help="Fault domain of the host within a group. Allowed values: 0, 1, 2") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help="Replace the host automatically if a failure occurs") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help="The software license type that will be applied to the VMs deployed on the dedicated host.") c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int, help="Number of fault domains that the host group can span.") c.argument('zones', zone_type) for scope in ["vm host", "vm host group"]: with self.argument_context("{} create".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = " Otherwise, location will default to the resource group's location" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings["help"] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group." " See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help="Specify the Microsoft.Network API version used when creating networking resources in the Network " "Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default " "value is 2020-11-01.") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules") c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`") c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion.') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use <Name>=<Value> to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access") c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have") c.ignore('identity_role_id') with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help="the $orderby odata query option") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \ "To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \ "To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \ "For more information see the Azure Windows VM online docs." c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None'])) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) with self.argument_context('vm update') as c: c.argument('license_type', help=license_msg, arg_type=get_enum_type( ['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None'])) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions", arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) with self.argument_context('sig image-version show') as c: c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.") c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
def load_arguments(self, _): # Model imports StorageAccountTypes = self.get_models('StorageAccountTypes') DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes,', operation_group='disks') SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') HyperVGenerationTypes, HyperVGeneration = self.get_models('HyperVGenerationTypes', 'HyperVGeneration') DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') # REUSABLE ARGUMENT DEFINITIONS name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') multi_ids_type = CLIArgumentType(nargs='+') existing_vm_name = CLIArgumentType(overrides=name_arg_type, configured_default='vm', help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`", completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') vmss_name_type = CLIArgumentType(name_arg_type, configured_default='vmss', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), help="Scale set name. You can configure the default using `az configure --defaults vmss=<name>`", id_part='name') extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.") image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute DiskStorageAccountTypes = DiskStorageAccountTypes or StorageAccountTypes if DiskStorageAccountTypes: disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) else: # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) if SnapshotStorageAccountTypes: snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) else: # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. # However, 2017-03-09-profile targets version 2016-03-30 of compute package. snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) # special case for `network nic scale-set list` command alias with self.argument_context('network nic scale-set list') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') HyperVGenerationTypes = HyperVGenerationTypes or HyperVGeneration if HyperVGenerationTypes: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1")) else: hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1")) ultra_ssd_enabled_type = CLIArgumentType( arg_type=get_three_state_flag(), min_api='2018-06-01', help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') scale_in_policy_type = CLIArgumentType( nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' ) edge_zone_type = CLIArgumentType( help='The name of edge zone.', min_api='2020-12-01', is_preview=True ) t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') shared_to_type = CLIArgumentType( arg_type=get_enum_type(t_shared_to), help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' 'If not specified, list by subscription id.' ) # region MixedScopes for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: with self.argument_context(scope) as c: c.argument('tags', tags_type) for scope in ['disk', 'snapshot']: with self.argument_context(scope) as c: c.ignore('source_blob_uri', 'source_disk', 'source_snapshot') c.argument('source_storage_account_id', help='used when source blob is in a different subscription') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') c.argument('for_upload', arg_type=get_three_state_flag(), help='Create the {0} for uploading blobs later on through storage commands. Run "az {0} grant-access --access-level Write" to retrieve the {0}\'s SAS token.'.format(scope)) c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') else: c.ignore('access_level', 'for_upload', 'hyper_v_generation') c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType')), help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') operation_group = 'disks' if scope == 'disk' else 'snapshots' c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default, and it does not apply to Ultra disks.') for scope in ['disk create', 'snapshot create']: with self.argument_context(scope) as c: c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') # endregion # region Disks with self.argument_context('disk') as c: c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10") c.argument('upload_size_bytes', type=int, min_api='2019-03-01', help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520') c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk') c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') c.argument('edge_zone', edge_zone_type) c.argument('security_type', choices=['TrustedLaunch'], help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') # endregion # region Snapshots with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) c.argument('name', arg_type=name_arg_type) c.argument('sku', arg_type=snapshot_sku) c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') c.argument('edge_zone', edge_zone_type) c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') # endregion # region Images with self.argument_context('image') as c: c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) c.argument('tags', tags_type) with self.argument_context('image create') as c: # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources c.argument('name', arg_type=name_arg_type, help='new image name') c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.") c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's data disk.") c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.') c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') c.argument('edge_zone', edge_zone_type, ) # endregion # region Image Templates with self.argument_context('image builder') as c: ib_output_name_help = "Name of the image builder run output." c.argument('location', get_location_type(self.cli_ctx)) c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL." " Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'") c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.") c.argument('image_template_name', image_template_name_type, help="The name of the image template.") c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image") c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." ' 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.') c.argument('output_name', help=ib_output_name_help) c.ignore('destinations_lists', 'scripts_list', 'source_dict') with self.argument_context('image builder create') as c: ib_source_type = CLIArgumentType(arg_group="Image Source") ib_customizer_type = CLIArgumentType(arg_group="Customizer") ib_cutput_type = CLIArgumentType(arg_group="Output") c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.") c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') # VM profile c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') # Image Source Arguments c.argument('source', arg_type=ib_source_type) c.argument('checksum', arg_type=ib_source_type) c.argument('', arg_type=ib_source_type) # Image Customizer Arguments c.argument('scripts', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) c.argument('', arg_type=ib_customizer_type) # Image Output Arguments c.argument('managed_image_destinations', arg_type=ib_cutput_type) c.argument('shared_image_destinations', arg_type=ib_cutput_type) c.argument('output_name', arg_type=ib_cutput_type) with self.argument_context('image builder output') as c: ib_sig_regions_help = "Space-separated list of regions to replicate the image version into." ib_img_location_help = "Location where the customized image will be created." c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.") c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help) c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.") c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help) with self.argument_context('image builder output add') as c: ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help'] ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"]) ib_default_loc_help = " Defaults to resource group's location." c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.") c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help) c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true') c.argument('tags', arg_type=ib_artifact_tags_type) c.ignore('location') with self.argument_context('image builder customizer') as c: ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart") ib_win_update_type = CLIArgumentType(arg_group="Windows Update") ib_script_type = CLIArgumentType(arg_group="Shell and Powershell") ib_powershell_type = CLIArgumentType(arg_group="Powershell") ib_file_customizer_type = CLIArgumentType(arg_group="File") c.argument('customizer_name', help="Name of the customizer.") c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType)) # Script Args c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.") c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.") # Powershell Specific Args c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") # Windows Restart Specific Args c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") # Windows Update Specific Args c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') # File Args c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") # endregion # region AvailabilitySets with self.argument_context('vm availability-set') as c: c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') with self.argument_context('vm availability-set create') as c: c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') with self.argument_context('vm availability-set update') as c: if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') c.argument('availability_set_name', options_list=['--availability-set-name']) # endregion # region VirtualMachines with self.argument_context('vm') as c: c.argument('vm_name', existing_vm_name) c.argument('size', completer=get_vm_size_completion_list) c.argument('name', arg_type=name_arg_type) c.argument('zone', zone_type, min_api='2017-03-30') c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') with self.argument_context('vm capture') as c: c.argument('overwrite', action='store_true') with self.argument_context('vm update') as c: c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") c.argument('write_accelerator', nargs='*', min_api='2017-12-01', help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') with self.argument_context('vm create') as c: c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('boot_diagnostics_storage', help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine") c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') c.argument('platform_fault_domain', min_api='2020-06-01', help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') c.argument('count', type=int, is_preview=True, help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') c.argument('security_type', arg_type=get_enum_type(['TrustedLaunch']), min_api='2020-12-01', help='Specify if the VM is Trusted Launch enabled. See https://docs.microsoft.com/azure/virtual-machines/trusted-launch.') c.argument('enable_secure_boot', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot. It is part of trusted launch.') c.argument('enable_vtpm', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM. It is part of trusted launch.') c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vm create', arg_group='Storage') as c: c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.") with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") with self.argument_context('vm open-port') as c: c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.") c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) for scope in ['vm show', 'vm list']: with self.argument_context(scope) as c: c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') for scope in ['vm show', 'vmss show']: with self.argument_context(scope) as c: c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: with self.argument_context(scope) as c: c.ignore('include_user_data') with self.argument_context('vm diagnostics') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) with self.argument_context('vm diagnostics set') as c: c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) with self.argument_context('vm install-patches') as c: c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only") c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') with self.argument_context('vm disk') as c: c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) c.argument('new', action='store_true', help='create a new disk') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') with self.argument_context('vm disk attach') as c: c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], help="The name or ID of the managed disk", validator=validate_vm_disk, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) with self.argument_context('vm disk detach') as c: c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') with self.argument_context('vm encryption enable') as c: c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted.') # Place aad arguments in their own group aad_arguments = 'Azure Active Directory' c.argument('aad_client_id', arg_group=aad_arguments) c.argument('aad_client_secret', arg_group=aad_arguments) c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) with self.argument_context('vm extension') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') c.argument('expand', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) with self.argument_context('vm extension list') as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm secret') as c: c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) c.argument('certificate', help='key vault certificate name or its full secret URL') c.argument('certificate_store', help='Windows certificate store names. Default: My') with self.argument_context('vm secret list') as c: c.argument('vm_name', arg_type=existing_vm_name, id_part=None) with self.argument_context('vm image') as c: c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('plan', help='image billing plan') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('version', help="image sku's version") c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted") with self.argument_context('vm image list') as c: c.argument('image_location', get_location_type(self.cli_ctx)) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-offers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-skus') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image list-publishers') as c: c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image show') as c: c.argument('skus', options_list=['--sku', '-s']) c.argument('edge_zone', edge_zone_type) with self.argument_context('vm image terms') as c: c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') c.argument('publisher', help='Image publisher') c.argument('offer', help='Image offer') c.argument('plan', help='Image billing plan') with self.argument_context('vm nic') as c: c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') with self.argument_context('vm nic show') as c: c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) with self.argument_context('vm unmanaged-disk') as c: c.argument('new', action='store_true', help='Create a new disk.') c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") with self.argument_context('vm unmanaged-disk attach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) with self.argument_context('vm unmanaged-disk detach') as c: c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: with self.argument_context(scope) as c: c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) with self.argument_context('vm unmanaged-disk list') as c: c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) with self.argument_context('vm user') as c: c.argument('username', options_list=['--username', '-u'], help='The user name') c.argument('password', options_list=['--password', '-p'], help='The user password') with self.argument_context('vm list-skus') as c: c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones") c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), help="show all information including vm sizes not available under the current subscription") c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') with self.argument_context('vm restart') as c: c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') with self.argument_context('vm host') as c: c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group") c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host") c.ignore('expand') with self.argument_context('vm host create') as c: c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, help="Fault domain of the host within a group. Allowed values: 0, 1, 2") c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), help="Replace the host automatically if a failure occurs") c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), help="The software license type that will be applied to the VMs deployed on the dedicated host.") c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/") with self.argument_context('vm host list') as c: c.argument('host_group_name', id_part=None) with self.argument_context('vm host group') as c: c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group") c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' 'false when not provided.') with self.argument_context('vm host group create') as c: c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int, help="Number of fault domains that the host group can span.") c.argument('zones', zone_type) for scope in ["vm host", "vm host group"]: with self.argument_context("{} create".format(scope)) as c: location_type = get_location_type(self.cli_ctx) custom_location_msg = " Otherwise, location will default to the resource group's location" custom_location_type = CLIArgumentType(overrides=location_type, help=location_type.settings["help"] + custom_location_msg) c.argument('location', arg_type=custom_location_type) # endregion # region VMSS scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] with self.argument_context('vmss') as c: c.argument('zones', zones_type, min_api='2017-03-30') c.argument('instance_id', id_part='child_name_1') c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') c.argument('tags', tags_type) c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type) c.argument('host_group', min_api='2020-06-01', help='Name or ID of dedicated host group that the virtual machine scale set resides in') for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: with self.argument_context(scope) as c: for dest in scaleset_name_aliases: c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) c.argument('name', name_arg_type) c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group." " See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.") c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') c.argument('instance_count', help='Number of VMs in the scale set.', type=int) c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') c.argument('upgrade_policy_mode', help=None, arg_type=get_enum_type(UpgradeMode)) c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set") c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', arg_type=get_enum_type(['Uniform', 'Flexible'])) c.argument('scale_in_policy', scale_in_policy_type) c.argument('automatic_repairs_grace_period', min_api='2018-10-01', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('network_api_version', min_api='2021-03-01', help="Specify the Microsoft.Network API version used when creating networking resources in the Network " "Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default " "value is 2020-11-01.") c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') with self.argument_context('vmss create', arg_group='Network Balancer') as c: LoadBalancerSkuName = self.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway']) c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb']) c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', options_list=['--lb-sku'], arg_type=get_enum_type(LoadBalancerSkuName), help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'") c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name']) with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules") c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm<vm-index>.<vm-domain-name>.<..rest..>`") c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6") c.argument('accelerated_networking', arg_type=get_three_state_flag(), help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") with self.argument_context('vmss update') as c: protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01') c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.") c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).") c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), help='Enable terminate notification') c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('scale_in_policy', scale_in_policy_type) c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') c.argument( 'automatic_repairs_grace_period', help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' ) for scope in ['vmss create', 'vmss update']: with self.argument_context(scope) as c: c.argument('terminate_notification_time', min_api='2019-03-01', help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') c.argument('pause_time_between_batches', min_api='2020-12-01', help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: with self.argument_context(scope) as c: c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix)) for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') with self.argument_context('vmss diagnostics') as c: c.argument('vmss_name', id_part=None, help='Scale set name') with self.argument_context('vmss disk') as c: options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') with self.argument_context('vmss encryption') as c: c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) with self.argument_context('vmss extension') as c: c.argument('extension_name', name_arg_type, help='Name of the extension.') c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss nic') as c: c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], help='Scale set name.', completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') c.argument('virtualmachine_index', options_list=['--instance-id'], id_part='child_name_1') c.argument('network_interface_name', options_list=['--name', '-n'], metavar='NIC_NAME', help='The network interface (NIC).', completer=get_resource_name_completion_list('Microsoft.Network/networkInterfaces'), id_part='child_name_2') with self.argument_context('vmss nic list') as c: c.argument('virtual_machine_scale_set_name', arg_type=vmss_name_type, options_list=['--vmss-name'], id_part=None) with self.argument_context('vmss set-orchestration-service-state') as c: c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') # endregion # region VM & VMSS Shared for scope in ['vm', 'vmss']: with self.argument_context(scope) as c: c.argument('no_auto_upgrade', options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') for scope in ['vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity remove', 'vmss identity remove']: with self.argument_context(scope) as c: c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm identity show', 'vmss identity show']: with self.argument_context(scope) as c: c.argument('vm_name', existing_vm_name) c.argument('vmss_name', vmss_name_type) for scope in ['vm create', 'vmss create']: with self.argument_context(scope) as c: c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') c.argument('tags', tags_type) c.argument('no_wait', help='Do not wait for the long-running operation to finish.') c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') c.argument('image', completer=get_urn_aliases_completion_list) c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter()) c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples") c.ignore('aux_subscriptions') c.argument('edge_zone', edge_zone_type) with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.") c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) else: allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS']) usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.' allowed_values = 'Allowed values: {}.'.format(allowed_values) storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ 'or specify individual disks. {} {}'.format(usage, allowed_values) c.argument('os_disk_name', help='The name of the new VM OS disk.') c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.") c.argument('storage_sku', nargs='+', help=storage_sku_help) c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds") c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `<lun>=<vaule1> <lun>=<value2>` to configure individual disk") c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.', is_preview=True) c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', help='Names or IDs (space delimited) of disk encryption sets for data disks.') c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion.') with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator).') c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): PublicIPAddressSkuName = self.get_models('PublicIPAddressSkuName', resource_type=ResourceType.MGMT_NETWORK) c.argument('public_ip_sku', help='Public IP SKU. It is set to Basic by default. The public IP is supported to be created on edge zone only when it is \'Standard\'', default=None, arg_type=get_enum_type(PublicIPAddressSkuName)) c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', help='Specify what happens to the network interface when the VM is deleted. Use a singular ' 'value to apply on all resources, or use <Name>=<Value> to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') c.argument('plan_publisher', help='plan publisher') c.argument('plan_promotion_code', help='plan promotion code') for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: with self.argument_context(scope) as c: arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, help="Scope that the system assigned identity can access") c.argument('identity_role', options_list=['--role'], arg_group=arg_group, help="Role name or id the system assigned identity will have") c.ignore('identity_role_id') with self.argument_context('vm auto-shutdown') as c: c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') c.argument('webhook', help='The webhook URL to which the notification will be sent') c.argument('location', validator=get_default_location_from_resource_group) for scope in ['vm diagnostics', 'vmss diagnostics']: with self.argument_context(scope) as c: c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) c.argument('is_windows_os', action='store_true', help='for Windows VMs') for scope in ['vm encryption', 'vmss encryption']: with self.argument_context(scope) as c: c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) c.argument('force', action='store_true', help='continue by ignoring client side validation errors') c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') for scope in ['vm extension', 'vmss extension']: with self.argument_context(scope) as c: c.argument('publisher', help='The name of the extension publisher.') c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') with self.argument_context('vm extension set') as c: c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part=None) c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) with self.argument_context('vmss extension set', min_api='2017-12-01') as c: c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') c.argument('extension_instance_name', extension_instance_name_type) c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') for scope in ['vm extension image', 'vmss extension image']: with self.argument_context(scope) as c: c.argument('image_location', options_list=['--location', '-l'], help='Image location.') c.argument('name', help='Image name', id_part=None) c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') c.argument('type', options_list=['--name', '-n'], help='Name of the extension') c.argument('latest', action='store_true', help='Show the latest version only.') c.argument('version', help='Extension version') c.argument('orderby', help="the $orderby odata query option") c.argument('top', help='the $top odata query option') for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: with self.argument_context(scope) as c: license_msg = "Specifies that the Windows image or disk was licensed on-premises. " \ "To enable Azure Hybrid Benefit for Windows Server, use 'Windows_Server'. " \ "To enable Multitenant Hosting Rights for Windows 10, use 'Windows_Client'. " \ "For more information see the Azure Windows VM online docs." c.argument('license_type', help=license_msg, arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'None'])) c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', min_api='2021-04-01', is_preview=True) with self.argument_context('vm update') as c: c.argument('license_type', help=license_msg, arg_type=get_enum_type( ['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_ELS_6', 'None'])) c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') with self.argument_context('vmss create') as c: c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") with self.argument_context('sig') as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') for scope in ['sig show', 'sig image-definition show', 'sig image-definition delete']: with self.argument_context(scope) as c: c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') with self.argument_context('sig list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx)) c.argument('shared_to', shared_to_type) with self.argument_context('sig show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') for scope in ['sig share add', 'sig share remove']: with self.argument_context(scope) as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') with self.argument_context('sig share add') as c: c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share remove') as c: c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), help='distinguish add operation and remove operation') with self.argument_context('sig share reset') as c: c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') with self.argument_context('sig image-definition create') as c: c.argument('offer', options_list=['--offer', '-f'], help='image offer') c.argument('sku', options_list=['--sku', '-s'], help='image sku') c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.") c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') c.argument('plan_name', help='plan name', arg_group='Purchase plan') c.argument('plan_product', help='plan product', arg_group='Purchase plan') c.argument('eula', help='The Eula agreement for the gallery image') c.argument('privacy_statement_uri', help='The privacy statement uri') c.argument('release_note_uri', help='The release note uri') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"') with self.argument_context('sig image-definition list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('shared_to', shared_to_type) with self.argument_context('sig image-definition show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') with self.argument_context('sig create') as c: c.argument('description', help='the description of the gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig update') as c: c.ignore('gallery') c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), arg_group='Sharing Profile', min_api='2020-09-30', is_experimental=True, help='This property allows you to specify the permission of sharing gallery.') c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', is_preview=True, help='Enable soft-deletion for resources in this gallery, ' 'allowing them to be recovered within retention time.') with self.argument_context('sig image-definition create') as c: c.argument('description', help='the description of the gallery image definition') with self.argument_context('sig image-definition update') as c: c.ignore('gallery_image') with self.argument_context('sig image-version') as c: deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0") c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `<MajorVersion>.<MinorVersion>.<Patch>`') c.argument('description', help='the description of the gallery image version') c.argument('managed_image', help='image name(if in the same resource group) or resource id') c.argument('os_snapshot', help='Name or ID of OS disk snapshot') c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') c.argument('version', help='image version') c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions", arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01') c.argument('target_region_encryption', nargs='+', help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `<os_des>,<lun1>,<lun1_des>,<lun2>,<lun2_des>`. Use "null" as a placeholder.') c.argument('os_vhd_uri', help='Source VHD URI of OS disk') c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') with self.argument_context('sig image-version list-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('shared_to', shared_to_type) with self.argument_context('sig image-version show') as c: c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'") with self.argument_context('sig image-version show-shared') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', id_part='child_name_1') c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', id_part='child_name_2') c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' 'Format: <MajorVersion>.<MinorVersion>.<Patch>', id_part='child_name_3') for scope in ['sig image-version create', 'sig image-version update']: with self.argument_context(scope) as c: c.argument('target_regions', nargs='*', validator=process_gallery_image_version_namespace, help='Space-separated list of regions and their replica counts. Use `<region>[=<replica count>][=<storage account type>]` to optionally set the replica count and/or storage account type for each region. ' 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) # endregion # region Proximity Placement Group with self.argument_context('ppg', min_api='2018-04-01') as c: c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.") with self.argument_context('ppg create', min_api='2018-04-01') as c: c.argument('ppg_type', options_list=['--type', '-t'], help="The type of the proximity placement group. Allowed values: Standard.") c.argument('tags', tags_type) with self.argument_context('ppg show', min_api='2019-07-01') as c: c.argument('include_colocation_status', action='store_true', help='Enable fetching the colocation status of all the resources in the proximity placement group.') for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), ('vm availability-set create', 'availability set'), ('vm update', 'VM'), ('vmss update', 'VMSS'), ('vm availability-set update', 'availability set')]: with self.argument_context(scope, min_api='2018-04-01') as c: c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item), validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. # endregion # region VM Monitor with self.argument_context('vm monitor log show') as c: c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.") c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.") with self.argument_context('vm monitor metrics') as c: c.argument('metricnamespace', options_list=['--namespace'], help='Namespace to query metric definitions for.') with self.argument_context('vm monitor metrics tail') as c: from azure.mgmt.monitor.models import AggregationType c.extra('resource_group_name', required=True) c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) c.argument('metadata', action='store_true') c.argument('dimension', nargs='*', validator=validate_metric_dimension) c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') c.argument('metrics', nargs='*') c.argument('orderby', help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') c.argument('filters', options_list=['--filter']) c.argument('metric_namespace', options_list=['--namespace']) with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) c.argument('offset', type=get_period_type(as_timedelta=True)) c.argument('interval', arg_group='Time', type=get_period_type()) with self.argument_context('vm monitor metrics list-definitions') as c: c.extra('resource_group_name', required=True) c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) # endregion # region disk encryption set with self.argument_context('disk-encryption-set') as c: c.argument('disk_encryption_set_name', disk_encryption_set_name) c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys']), help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed.') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', options_list=['--enable-auto-key-rotation', '--auto-rotation'], help='Enable automatic rotation of keys.') # endregion # region DiskAccess with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') c.argument('location', validator=get_default_location_from_resource_group) c.argument('tags', tags_type) # endRegion with self.argument_context('capacity reservation group') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], help='The name of the capacity reservation group.') c.argument('tags', tags_type) with self.argument_context('capacity reservation group create') as c: c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') with self.argument_context('capacity reservation group show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') with self.argument_context('capacity reservation group list') as c: c.argument('vm_instance', action='store_true', help='Retrieve the Virtual Machine Instance which are associated to capacity reservation group in the response.') c.argument('vmss_instance', action='store_true', help='Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.') with self.argument_context('capacity reservation') as c: c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], help='The name of the capacity reservation group.') c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], help='The name of the capacity reservation.') c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') c.argument('tags', tags_type) with self.argument_context('capacity reservation create') as c: c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') with self.argument_context('capacity reservation show') as c: c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.')
35,036
def test_squeezenet(): Device.load("test_config.json") if skip_runtime_test(): return try: import tvm.relay.testing.tf as tf_testing except ImportError: pytest.skip("Missing TF Support") device = Device() def get_model(): model_path = tf_testing.get_workload_official( "https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz", "squeezenet.tflite", ) inputs = {"Placeholder": ((1, 224, 224, 3), "float32")} mod, params = _get_tflite_model(model_path, inputs_dict=inputs) return mod, params, inputs _build_and_run_network( *get_model(), device=device, tvm_ops=9, acl_partitions=31, atol=8, rtol=0 )
def test_squeezenet(): Device.load("test_config.json") if skip_runtime_test(): return try: import tvm.relay.testing.tf as tf_testing except ImportError: pytest.skip("Missing Tflite Support") device = Device() def get_model(): model_path = tf_testing.get_workload_official( "https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz", "squeezenet.tflite", ) inputs = {"Placeholder": ((1, 224, 224, 3), "float32")} mod, params = _get_tflite_model(model_path, inputs_dict=inputs) return mod, params, inputs _build_and_run_network( *get_model(), device=device, tvm_ops=9, acl_partitions=31, atol=8, rtol=0 )
5,805
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, limlst=50): """ Compute a definite integral. Integrate func from `a` to `b` (possibly infinite interval) using a technique from the Fortran library QUADPACK. Parameters ---------- func : {function, scipy.LowLevelCallable} A Python function or method to integrate. If `func` takes many arguments, it is integrated along the axis corresponding to the first argument. If the user desires improved integration performance, then `f` may be a `scipy.LowLevelCallable` with one of the signatures:: double func(double x) double func(double x, void *user_data) double func(int n, double *xx) double func(int n, double *xx, void *user_data) The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. In the call forms with ``xx``, ``n`` is the length of the ``xx`` array which contains ``xx[0] == x`` and the rest of the items are numbers contained in the ``args`` argument of quad. In addition, certain ctypes call signatures are supported for backward compatibility, but those should not be used in new code. a : float Lower limit of integration (use -numpy.inf for -infinity). b : float Upper limit of integration (use numpy.inf for +infinity). args : tuple, optional Extra arguments to pass to `func`. full_output : int, optional Non-zero to return a dictionary of integration information. If non-zero, warning messages are also suppressed and the message is appended to the output tuple. Returns ------- y : float The integral of func from `a` to `b`. abserr : float An estimate of the absolute error in the result. infodict : dict A dictionary containing additional information. Run scipy.integrate.quad_explain() for more information. message A convergence message. explain Appended only with 'cos' or 'sin' weighting and infinite integration limits, it contains an explanation of the codes in infodict['ierlst'] Other Parameters ---------------- epsabs : float or int, optional Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the numerical approximation. See `epsrel` below. epsrel : float or int, optional Relative error tolerance. Default is 1.49e-8. If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 and ``50 * (machine epsilon)``. See `epsabs` above. limit : float or int, optional An upper bound on the number of subintervals used in the adaptive algorithm. points : (sequence of floats,ints), optional A sequence of break points in the bounded integration interval where local difficulties of the integrand may occur (e.g., singularities, discontinuities). The sequence does not have to be sorted. Note that this option cannot be used in conjunction with ``weight``. weight : float or int, optional String indicating weighting function. Full explanation for this and the remaining arguments can be found below. wvar : optional Variables for use with weighting functions. wopts : optional Optional input for reusing Chebyshev moments. maxp1 : float or int, optional An upper bound on the number of Chebyshev moments. limlst : int, optional Upper bound on the number of cycles (>=3) for use with a sinusoidal weighting and an infinite end-point. See Also -------- dblquad : double integral tplquad : triple integral nquad : n-dimensional integrals (uses `quad` recursively) fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simpson : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Notes ----- **Extra information for quad() inputs and outputs** If full_output is non-zero, then the third output argument (infodict) is a dictionary with entries as tabulated below. For infinite limits, the range is transformed to (0,1) and the optional outputs are given with respect to this transformed range. Let M be the input argument limit and let K be infodict['last']. The entries are: 'neval' The number of function evaluations. 'last' The number, K, of subintervals produced in the subdivision process. 'alist' A rank-1 array of length M, the first K elements of which are the left end points of the subintervals in the partition of the integration range. 'blist' A rank-1 array of length M, the first K elements of which are the right end points of the subintervals. 'rlist' A rank-1 array of length M, the first K elements of which are the integral approximations on the subintervals. 'elist' A rank-1 array of length M, the first K elements of which are the moduli of the absolute error estimates on the subintervals. 'iord' A rank-1 integer array of length M, the first L elements of which are pointers to the error estimates over the subintervals with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the sequence ``infodict['iord']`` and let E be the sequence ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a decreasing sequence. If the input argument points is provided (i.e., it is not None), the following additional outputs are placed in the output dictionary. Assume the points sequence is of length P. 'pts' A rank-1 array of length P+2 containing the integration limits and the break points of the intervals in ascending order. This is an array giving the subintervals over which integration will occur. 'level' A rank-1 integer array of length M (=limit), containing the subdivision levels of the subintervals, i.e., if (aa,bb) is a subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. 'ndin' A rank-1 integer array of length P+2. After the first integration over the intervals (pts[1], pts[2]), the error estimates over some of the intervals may have been increased artificially in order to put their subdivision forward. This array has ones in slots corresponding to the subintervals for which this happens. **Weighting the integrand** The input variables, *weight* and *wvar*, are used to weight the integrand by a select list of functions. Different integration methods are used to compute the integral with these weighting functions, and these do not support specifying break points. The possible values of weight and the corresponding weighting functions are. ========== =================================== ===================== ``weight`` Weight function used ``wvar`` ========== =================================== ===================== 'cos' cos(w*x) wvar = w 'sin' sin(w*x) wvar = w 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) 'cauchy' 1/(x-c) wvar = c ========== =================================== ===================== wvar holds the parameter w, (alpha, beta), or c depending on the weight selected. In these expressions, a and b are the integration limits. For the 'cos' and 'sin' weighting, additional inputs and outputs are available. For finite integration limits, the integration is performed using a Clenshaw-Curtis method which uses Chebyshev moments. For repeated calculations, these moments are saved in the output dictionary: 'momcom' The maximum level of Chebyshev moments that have been computed, i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been computed for intervals of length ``|b-a| * 2**(-l)``, ``l=0,1,...,M_c``. 'nnlog' A rank-1 integer array of length M(=limit), containing the subdivision levels of the subintervals, i.e., an element of this array is equal to l if the corresponding subinterval is ``|b-a|* 2**(-l)``. 'chebmo' A rank-2 array of shape (25, maxp1) containing the computed Chebyshev moments. These can be passed on to an integration over the same interval by passing this array as the second element of the sequence wopts and passing infodict['momcom'] as the first element. If one of the integration limits is infinite, then a Fourier integral is computed (assuming w neq 0). If full_output is 1 and a numerical error is encountered, besides the error message attached to the output tuple, a dictionary is also appended to the output tuple which translates the error codes in the array ``info['ierlst']`` to English messages. The output information dictionary contains the following entries instead of 'last', 'alist', 'blist', 'rlist', and 'elist': 'lst' The number of subintervals needed for the integration (call it ``K_f``). 'rslst' A rank-1 array of length M_f=limlst, whose first ``K_f`` elements contain the integral contribution over the interval ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` and ``k=1,2,...,K_f``. 'erlst' A rank-1 array of length ``M_f`` containing the error estimate corresponding to the interval in the same position in ``infodict['rslist']``. 'ierlst' A rank-1 integer array of length ``M_f`` containing an error flag corresponding to the interval in the same position in ``infodict['rslist']``. See the explanation dictionary (last entry in the output tuple) for the meaning of the codes. Examples -------- Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result >>> from scipy import integrate >>> x2 = lambda x: x**2 >>> integrate.quad(x2, 0, 4) (21.333333333333332, 2.3684757858670003e-13) >>> print(4**3 / 3.) # analytical result 21.3333333333 Calculate :math:`\\int^\\infty_0 e^{-x} dx` >>> invexp = lambda x: np.exp(-x) >>> integrate.quad(invexp, 0, np.inf) (1.0, 5.842605999138044e-11) Calculate :math:`\\int^1_0 a x dx` for :math:`a = 1, 3` >>> f = lambda x, a : a*x >>> y, err = integrate.quad(f, 0, 1, args=(1,)) >>> y 0.5 >>> y, err = integrate.quad(f, 0, 1, args=(3,)) >>> y 1.5 Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding y parameter as 1:: testlib.c => double func(int n, double args[n]){ return args[0]*args[0] + args[1]*args[1];} compile to library testlib.* :: from scipy import integrate import ctypes lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path lib.func.restype = ctypes.c_double lib.func.argtypes = (ctypes.c_int,ctypes.c_double) integrate.quad(lib.func,0,1,(1)) #(1.3333333333333333, 1.4802973661668752e-14) print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result # 1.3333333333333333 Be aware that pulse shapes and other sharp features as compared to the size of the integration interval may not be integrated correctly using this method. A simplified example of this limitation is integrating a y-axis reflected step function with many zero values within the integrals bounds. >>> y = lambda x: 1 if x<=0 else 0 >>> integrate.quad(y, -1, 1) (1.0, 1.1102230246251565e-14) >>> integrate.quad(y, -1, 100) (1.0000000002199108, 1.0189464580163188e-08) >>> integrate.quad(y, -1, 10000) (0.0, 0.0) """ if not isinstance(args, tuple): args = (args,) # check the limits of integration: \int_a^b, expect a < b flip, a, b = b < a, min(a, b), max(a, b) if weight is None: retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, points) else: if points is not None: msg = ("Break points cannot be specified when using weighted integrand.\n" "Continuing, ignoring specified points.") warnings.warn(msg, IntegrationWarning, stacklevel=2) retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, limlst, limit, maxp1, weight, wvar, wopts) if flip: retval = (-retval[0],) + retval[1:] ier = retval[-1] if ier == 0: return retval[:-1] msgs = {80: "A Python error occurred possibly while calling the function.", 1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit, 2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.", 3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.", 4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.", 5: "The integral is probably divergent, or slowly convergent.", 6: "The input is invalid.", 7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.", 'unknown': "Unknown error."} if weight in ['cos','sin'] and (b == Inf or a == -Inf): msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1." msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1." msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1." explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.", 2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.", 3: "Extremely bad integrand behavior occurs at some points of\n this cycle.", 4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.", 5: "The integral over this cycle is probably divergent or slowly convergent."} try: msg = msgs[ier] except KeyError: msg = msgs['unknown'] if ier in [1,2,3,4,5,7]: if full_output: if weight in ['cos', 'sin'] and (b == Inf or a == -Inf): return retval[:-1] + (msg, explain) else: return retval[:-1] + (msg,) else: warnings.warn(msg, IntegrationWarning, stacklevel=2) return retval[:-1] elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 if epsabs <= 0: # Small error tolerance - applies to all methods if epsrel < max(50 * sys.float_info.epsilon, 5e-29): msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both" " 5e-29 and 50*(machine epsilon).") elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf): msg = ("Sine or cosine weighted intergals with infinite domain" " must have 'epsabs'>0.") elif weight is None: if points is None: # QAGSE/QAGIE msg = ("Invalid 'limit' argument. There must be" " at least one subinterval") else: # QAGPE if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): msg = ("All break points in 'points' must lie within the" " integration limits.") elif len(points) >= limit: msg = ("Number of break points ({:d})" " must be less than subinterval" " limit ({:d})").format(len(points), limit) else: if maxp1 < 1: msg = "Chebyshev moment limit maxp1 must be >=1." elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE msg = "Cycle limit limlst must be >=3." elif weight.startswith('alg'): # QAWSE if min(wvar) < -1: msg = "wvar parameters (alpha, beta) must both be >= -1." if b < a: msg = "Integration limits a, b must satistfy a<b." elif weight == 'cauchy' and wvar in (a, b): msg = ("Parameter 'wvar' must not equal" " integration limits 'a' or 'b'.") raise ValueError(msg)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, limlst=50): """ Compute a definite integral. Integrate func from `a` to `b` (possibly infinite interval) using a technique from the Fortran library QUADPACK. Parameters ---------- func : {function, scipy.LowLevelCallable} A Python function or method to integrate. If `func` takes many arguments, it is integrated along the axis corresponding to the first argument. If the user desires improved integration performance, then `f` may be a `scipy.LowLevelCallable` with one of the signatures:: double func(double x) double func(double x, void *user_data) double func(int n, double *xx) double func(int n, double *xx, void *user_data) The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. In the call forms with ``xx``, ``n`` is the length of the ``xx`` array which contains ``xx[0] == x`` and the rest of the items are numbers contained in the ``args`` argument of quad. In addition, certain ctypes call signatures are supported for backward compatibility, but those should not be used in new code. a : float Lower limit of integration (use -numpy.inf for -infinity). b : float Upper limit of integration (use numpy.inf for +infinity). args : tuple, optional Extra arguments to pass to `func`. full_output : int, optional Non-zero to return a dictionary of integration information. If non-zero, warning messages are also suppressed and the message is appended to the output tuple. Returns ------- y : float The integral of func from `a` to `b`. abserr : float An estimate of the absolute error in the result. infodict : dict A dictionary containing additional information. Run scipy.integrate.quad_explain() for more information. message A convergence message. explain Appended only with 'cos' or 'sin' weighting and infinite integration limits, it contains an explanation of the codes in infodict['ierlst'] Other Parameters ---------------- epsabs : float or int, optional Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the numerical approximation. See `epsrel` below. epsrel : float or int, optional Relative error tolerance. Default is 1.49e-8. If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 and ``50 * (machine epsilon)``. See `epsabs` above. limit : float or int, optional An upper bound on the number of subintervals used in the adaptive algorithm. points : (sequence of floats,ints), optional A sequence of break points in the bounded integration interval where local difficulties of the integrand may occur (e.g., singularities, discontinuities). The sequence does not have to be sorted. Note that this option cannot be used in conjunction with ``weight``. weight : float or int, optional String indicating weighting function. Full explanation for this and the remaining arguments can be found below. wvar : optional Variables for use with weighting functions. wopts : optional Optional input for reusing Chebyshev moments. maxp1 : float or int, optional An upper bound on the number of Chebyshev moments. limlst : int, optional Upper bound on the number of cycles (>=3) for use with a sinusoidal weighting and an infinite end-point. See Also -------- dblquad : double integral tplquad : triple integral nquad : n-dimensional integrals (uses `quad` recursively) fixed_quad : fixed-order Gaussian quadrature quadrature : adaptive Gaussian quadrature odeint : ODE integrator ode : ODE integrator simpson : integrator for sampled data romb : integrator for sampled data scipy.special : for coefficients and roots of orthogonal polynomials Notes ----- **Extra information for quad() inputs and outputs** If full_output is non-zero, then the third output argument (infodict) is a dictionary with entries as tabulated below. For infinite limits, the range is transformed to (0,1) and the optional outputs are given with respect to this transformed range. Let M be the input argument limit and let K be infodict['last']. The entries are: 'neval' The number of function evaluations. 'last' The number, K, of subintervals produced in the subdivision process. 'alist' A rank-1 array of length M, the first K elements of which are the left end points of the subintervals in the partition of the integration range. 'blist' A rank-1 array of length M, the first K elements of which are the right end points of the subintervals. 'rlist' A rank-1 array of length M, the first K elements of which are the integral approximations on the subintervals. 'elist' A rank-1 array of length M, the first K elements of which are the moduli of the absolute error estimates on the subintervals. 'iord' A rank-1 integer array of length M, the first L elements of which are pointers to the error estimates over the subintervals with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the sequence ``infodict['iord']`` and let E be the sequence ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a decreasing sequence. If the input argument points is provided (i.e., it is not None), the following additional outputs are placed in the output dictionary. Assume the points sequence is of length P. 'pts' A rank-1 array of length P+2 containing the integration limits and the break points of the intervals in ascending order. This is an array giving the subintervals over which integration will occur. 'level' A rank-1 integer array of length M (=limit), containing the subdivision levels of the subintervals, i.e., if (aa,bb) is a subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. 'ndin' A rank-1 integer array of length P+2. After the first integration over the intervals (pts[1], pts[2]), the error estimates over some of the intervals may have been increased artificially in order to put their subdivision forward. This array has ones in slots corresponding to the subintervals for which this happens. **Weighting the integrand** The input variables, *weight* and *wvar*, are used to weight the integrand by a select list of functions. Different integration methods are used to compute the integral with these weighting functions, and these do not support specifying break points. The possible values of weight and the corresponding weighting functions are. ========== =================================== ===================== ``weight`` Weight function used ``wvar`` ========== =================================== ===================== 'cos' cos(w*x) wvar = w 'sin' sin(w*x) wvar = w 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) 'cauchy' 1/(x-c) wvar = c ========== =================================== ===================== wvar holds the parameter w, (alpha, beta), or c depending on the weight selected. In these expressions, a and b are the integration limits. For the 'cos' and 'sin' weighting, additional inputs and outputs are available. For finite integration limits, the integration is performed using a Clenshaw-Curtis method which uses Chebyshev moments. For repeated calculations, these moments are saved in the output dictionary: 'momcom' The maximum level of Chebyshev moments that have been computed, i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been computed for intervals of length ``|b-a| * 2**(-l)``, ``l=0,1,...,M_c``. 'nnlog' A rank-1 integer array of length M(=limit), containing the subdivision levels of the subintervals, i.e., an element of this array is equal to l if the corresponding subinterval is ``|b-a|* 2**(-l)``. 'chebmo' A rank-2 array of shape (25, maxp1) containing the computed Chebyshev moments. These can be passed on to an integration over the same interval by passing this array as the second element of the sequence wopts and passing infodict['momcom'] as the first element. If one of the integration limits is infinite, then a Fourier integral is computed (assuming w neq 0). If full_output is 1 and a numerical error is encountered, besides the error message attached to the output tuple, a dictionary is also appended to the output tuple which translates the error codes in the array ``info['ierlst']`` to English messages. The output information dictionary contains the following entries instead of 'last', 'alist', 'blist', 'rlist', and 'elist': 'lst' The number of subintervals needed for the integration (call it ``K_f``). 'rslst' A rank-1 array of length M_f=limlst, whose first ``K_f`` elements contain the integral contribution over the interval ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` and ``k=1,2,...,K_f``. 'erlst' A rank-1 array of length ``M_f`` containing the error estimate corresponding to the interval in the same position in ``infodict['rslist']``. 'ierlst' A rank-1 integer array of length ``M_f`` containing an error flag corresponding to the interval in the same position in ``infodict['rslist']``. See the explanation dictionary (last entry in the output tuple) for the meaning of the codes. Examples -------- Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result >>> from scipy import integrate >>> x2 = lambda x: x**2 >>> integrate.quad(x2, 0, 4) (21.333333333333332, 2.3684757858670003e-13) >>> print(4**3 / 3.) # analytical result 21.3333333333 Calculate :math:`\\int^\\infty_0 e^{-x} dx` >>> invexp = lambda x: np.exp(-x) >>> integrate.quad(invexp, 0, np.inf) (1.0, 5.842605999138044e-11) Calculate :math:`\\int^1_0 a x \, dx` for :math:`a = 1, 3` >>> f = lambda x, a : a*x >>> y, err = integrate.quad(f, 0, 1, args=(1,)) >>> y 0.5 >>> y, err = integrate.quad(f, 0, 1, args=(3,)) >>> y 1.5 Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding y parameter as 1:: testlib.c => double func(int n, double args[n]){ return args[0]*args[0] + args[1]*args[1];} compile to library testlib.* :: from scipy import integrate import ctypes lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path lib.func.restype = ctypes.c_double lib.func.argtypes = (ctypes.c_int,ctypes.c_double) integrate.quad(lib.func,0,1,(1)) #(1.3333333333333333, 1.4802973661668752e-14) print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result # 1.3333333333333333 Be aware that pulse shapes and other sharp features as compared to the size of the integration interval may not be integrated correctly using this method. A simplified example of this limitation is integrating a y-axis reflected step function with many zero values within the integrals bounds. >>> y = lambda x: 1 if x<=0 else 0 >>> integrate.quad(y, -1, 1) (1.0, 1.1102230246251565e-14) >>> integrate.quad(y, -1, 100) (1.0000000002199108, 1.0189464580163188e-08) >>> integrate.quad(y, -1, 10000) (0.0, 0.0) """ if not isinstance(args, tuple): args = (args,) # check the limits of integration: \int_a^b, expect a < b flip, a, b = b < a, min(a, b), max(a, b) if weight is None: retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, points) else: if points is not None: msg = ("Break points cannot be specified when using weighted integrand.\n" "Continuing, ignoring specified points.") warnings.warn(msg, IntegrationWarning, stacklevel=2) retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, limlst, limit, maxp1, weight, wvar, wopts) if flip: retval = (-retval[0],) + retval[1:] ier = retval[-1] if ier == 0: return retval[:-1] msgs = {80: "A Python error occurred possibly while calling the function.", 1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit, 2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.", 3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.", 4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.", 5: "The integral is probably divergent, or slowly convergent.", 6: "The input is invalid.", 7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.", 'unknown': "Unknown error."} if weight in ['cos','sin'] and (b == Inf or a == -Inf): msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1." msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1." msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1." explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.", 2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.", 3: "Extremely bad integrand behavior occurs at some points of\n this cycle.", 4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.", 5: "The integral over this cycle is probably divergent or slowly convergent."} try: msg = msgs[ier] except KeyError: msg = msgs['unknown'] if ier in [1,2,3,4,5,7]: if full_output: if weight in ['cos', 'sin'] and (b == Inf or a == -Inf): return retval[:-1] + (msg, explain) else: return retval[:-1] + (msg,) else: warnings.warn(msg, IntegrationWarning, stacklevel=2) return retval[:-1] elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 if epsabs <= 0: # Small error tolerance - applies to all methods if epsrel < max(50 * sys.float_info.epsilon, 5e-29): msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both" " 5e-29 and 50*(machine epsilon).") elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == Inf): msg = ("Sine or cosine weighted intergals with infinite domain" " must have 'epsabs'>0.") elif weight is None: if points is None: # QAGSE/QAGIE msg = ("Invalid 'limit' argument. There must be" " at least one subinterval") else: # QAGPE if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): msg = ("All break points in 'points' must lie within the" " integration limits.") elif len(points) >= limit: msg = ("Number of break points ({:d})" " must be less than subinterval" " limit ({:d})").format(len(points), limit) else: if maxp1 < 1: msg = "Chebyshev moment limit maxp1 must be >=1." elif weight in ('cos', 'sin') and abs(a+b) == Inf: # QAWFE msg = "Cycle limit limlst must be >=3." elif weight.startswith('alg'): # QAWSE if min(wvar) < -1: msg = "wvar parameters (alpha, beta) must both be >= -1." if b < a: msg = "Integration limits a, b must satistfy a<b." elif weight == 'cauchy' and wvar in (a, b): msg = ("Parameter 'wvar' must not equal" " integration limits 'a' or 'b'.") raise ValueError(msg)
52,940
def load_sample_data_path(workspace): """ Load the certbot configuration example designed to make OCSP tests, and return its path :param str workspace: current test workspace directory path :return str: the path to the loaded sample data directory """ certbot_root_directory = find_certbot_root_directory() original = os.path.join(certbot_root_directory, 'tests', 'integration', 'sample-config') copied = os.path.join(workspace, 'sample-config') shutil.copytree(original, copied, symlinks=True) return copied
def load_sample_data_path(workspace): """ Load the certbot configuration example designed to make OCSP tests, and return its path :param str workspace: current test workspace directory path :returns: the path to the loaded sample data directory :rtype: str """ certbot_root_directory = find_certbot_root_directory() original = os.path.join(certbot_root_directory, 'tests', 'integration', 'sample-config') copied = os.path.join(workspace, 'sample-config') shutil.copytree(original, copied, symlinks=True) return copied
35,580
def draw_bounding_boxes( image: torch.Tensor, boxes: torch.Tensor, labels: torch.Tensor, label_names: List[int] = None, colors: Dict[int, str] = None, draw_labels: bool = True, width: int = 1 ) -> torch.Tensor: """ Draws bounding boxes on given image. Args: image (Tensor): Tensor of shape (C x H x W) bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. labels (Tensor): Tensor of size (N) Labels for each bounding boxes. label_names (List): List containing labels excluding background. colors (dict): Dict with key as label id and value as color name. draw_labels (bool): If True draws label names on bounding boxes. width (int): Width of bounding box. """ # Code co-contributed by sumanthratna # Currently works for (C x H x W) images, but I think we should extend. # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer if not (torch.is_tensor(image)): raise TypeError('tensor expected, got {}'.format(type(image))) if label_names is not None: # Since for our detection models class 0 is background label_names.insert(0, "__background__") ndarr = image.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() # Neceassary check since FRCNN returns boxes which have grad enabled. if(boxes.requires_grad): boxes = boxes.detach() boxes = boxes.to('cpu').numpy().astype('int').tolist() labels = labels.to('cpu').numpy().astype('int').tolist() img_to_draw = Image.fromarray(ndarr) draw = ImageDraw.Draw(img_to_draw) for bbox, label in zip(boxes, labels): if colors is None: draw.rectangle(bbox, width=width) else: draw.rectangle(bbox, width=width, outline=colors[label]) if label_names is None: draw.text((bbox[0], bbox[1]), str(label)) else: if draw_labels is True: draw.text((bbox[0], bbox[1]), label_names[int(label)]) return torch.from_numpy(np.array(img_to_draw))
def draw_bounding_boxes( image: torch.Tensor, boxes: torch.Tensor, labels: torch.Tensor, label_names: List[int] = None, colors: Dict[int, str] = None, draw_labels: bool = True, width: int = 1 ) -> torch.Tensor: """ Draws bounding boxes on given image. Args: image (Tensor): Tensor of shape (C x H x W) bboxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. labels (Tensor): Tensor of size (N) Labels for each bounding boxes. label_names (List): List containing labels excluding background. colors (dict): Dict with key as label id and value as color name. draw_labels (bool): If True draws label names on bounding boxes. width (int): Width of bounding box. """ # Code co-contributed by sumanthratna # Currently works for (C x H x W) images, but I think we should extend. # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer if not (torch.is_tensor(image)): raise TypeError(f'tensor expected, got {type(image)}') if label_names is not None: # Since for our detection models class 0 is background label_names.insert(0, "__background__") ndarr = image.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() # Neceassary check since FRCNN returns boxes which have grad enabled. if(boxes.requires_grad): boxes = boxes.detach() boxes = boxes.to('cpu').numpy().astype('int').tolist() labels = labels.to('cpu').numpy().astype('int').tolist() img_to_draw = Image.fromarray(ndarr) draw = ImageDraw.Draw(img_to_draw) for bbox, label in zip(boxes, labels): if colors is None: draw.rectangle(bbox, width=width) else: draw.rectangle(bbox, width=width, outline=colors[label]) if label_names is None: draw.text((bbox[0], bbox[1]), str(label)) else: if draw_labels is True: draw.text((bbox[0], bbox[1]), label_names[int(label)]) return torch.from_numpy(np.array(img_to_draw))
30,277
def auth_check(spf_data, dkim_data, dmarc_data, override_dict): spf = spf_data.get('Validation-Result') dmarc = dmarc_data.get('Validation-Result') dkim = dkim_data.get('Validation-Result') if "spf-{}".format(spf) in override_dict: return override_dict.get("spf-{}".format(spf)) if "dkim-{}".format(dkim) in override_dict: return override_dict.get("dkim-{}".format(dkim)) if "dmarc-{}".format(dmarc) in override_dict: return override_dict.get("dmarc-{}".format(dmarc)) if spf == 'fail' or dkim == 'fail' or dmarc == 'fail': return "Fail" if spf == 'softfail' or dkim == 'policy': return "Suspicious" undetermined = [None, "none", "temperror", "permerror"] if dmarc in undetermined or spf in undetermined or dkim in undetermined \ or dkim == "neutral": return "Undetermined" return "Pass"
def auth_check(spf_data, dkim_data, dmarc_data, override_dict): spf = spf_data.get('Validation-Result') dmarc = dmarc_data.get('Validation-Result') dkim = dkim_data.get('Validation-Result') if "spf-{}".format(spf) in override_dict: return override_dict.get("spf-{}".format(spf)) if "dkim-{}".format(dkim) in override_dict: return override_dict.get("dkim-{}".format(dkim)) if "dmarc-{}".format(dmarc) in override_dict: return override_dict.get("dmarc-{}".format(dmarc)) if 'fail' in [spf, dkim, dmarc]: return "Fail" if spf == 'softfail' or dkim == 'policy': return "Suspicious" undetermined = [None, "none", "temperror", "permerror"] if dmarc in undetermined or spf in undetermined or dkim in undetermined \ or dkim == "neutral": return "Undetermined" return "Pass"
33,513
def test_render_template_values(): util = VtlTemplate().VelocityUtil() encoded = util.urlEncode("x=a+b") assert encoded == "x%3Da%2Bb" decoded = util.urlDecode("x=a+b") assert decoded == "x=a b" escape_tests = ( ("it's", '"it\'s"'), ("0010", "10"), ("true", "true"), ("True", '"True"'), ("1.021", "1.021"), ("'''", "\"'''\""), ('""', '""'), ('"""', '"\\"\\"\\""'), ('{"foo": 123}', '{"foo": 123}'), ('{"foo"": 123}', '"{\\"foo\\"\\": 123}"'), (1, "1"), (True, "true"), ) for string, expected in escape_tests: escaped = util.escapeJavaScript(string) assert escaped == expected # we should be able to json.loads in all of the cases! json.loads(escaped)
def test_render_template_values(): util = VtlTemplate.VelocityUtil() encoded = util.urlEncode("x=a+b") assert encoded == "x%3Da%2Bb" decoded = util.urlDecode("x=a+b") assert decoded == "x=a b" escape_tests = ( ("it's", '"it\'s"'), ("0010", "10"), ("true", "true"), ("True", '"True"'), ("1.021", "1.021"), ("'''", "\"'''\""), ('""', '""'), ('"""', '"\\"\\"\\""'), ('{"foo": 123}', '{"foo": 123}'), ('{"foo"": 123}', '"{\\"foo\\"\\": 123}"'), (1, "1"), (True, "true"), ) for string, expected in escape_tests: escaped = util.escapeJavaScript(string) assert escaped == expected # we should be able to json.loads in all of the cases! json.loads(escaped)