id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
53,166
def get_check_logger(default_logger=None): """ Search the current AgentCheck log starting from closest stack frame. Caveat: Frame lookup have a cost. GOOD: Call `get_check_logger` in a check config class __init__ at check instance creation. BAD: Call `get_check_logger` at every check run. """ from datadog_checks.base import AgentCheck for i in range(LOGGER_FRAME_SEARCH_MAX_DEPTH): try: frame = sys._getframe(i) except ValueError: break if 'self' in frame.f_locals: check = frame.f_locals['self'] if isinstance(check, AgentCheck): return check.log if default_logger is not None: return default_logger return DEFAULT_FALLBACK_LOGGER
def get_check_logger(default_logger=None): """ Search the current AgentCheck log starting from closest stack frame. Caveat: Frame lookup has a cost so the recommended usage is to retrieve and store the logger once and avoid calling this method on every check run. """ from datadog_checks.base import AgentCheck for i in range(LOGGER_FRAME_SEARCH_MAX_DEPTH): try: frame = sys._getframe(i) except ValueError: break if 'self' in frame.f_locals: check = frame.f_locals['self'] if isinstance(check, AgentCheck): return check.log if default_logger is not None: return default_logger return DEFAULT_FALLBACK_LOGGER
14,155
def build(build_dir="cocotb_build"): logger = logging.getLogger(__name__) distutils.log.set_verbosity(0) # Disable logging comiliation commands in disutils # distutils.log.set_verbosity(distutils.log.DEBUG) # Set DEBUG level cfg_vars = distutils.sysconfig.get_config_vars() for key, value in cfg_vars.items(): if type(value) == str: cfg_vars[key] = value.replace("-Wstrict-prototypes", "") if sys.platform == "darwin": cfg_vars["LDSHARED"] = cfg_vars["LDSHARED"].replace("-bundle", "-dynamiclib") share_dir = os.path.join(os.path.dirname(__file__), "share") share_lib_dir = os.path.join(share_dir, "lib") build_dir = os.path.abspath(build_dir) include_dir = os.path.join(share_dir, "include") dist = Distribution() dist.parse_config_files() # # Icarus Verilog # logger.warning("Compiling interface libraries for Icarus Verilog ...") icarus_build_dir = os.path.join(build_dir, "icarus") icarus_compile = True icarus_extra_lib = [] icarus_extra_lib_path = [] if os.name == "nt": iverilog_path = find_executable("iverilog") if iverilog_path is None: logger.warning( "Icarus Verilog executable not found. VPI interface will not be avaliable." ) icarus_compile = False else: icarus_path = os.path.dirname(os.path.dirname(iverilog_path)) icarus_extra_lib = ["vpi"] icarus_extra_lib_path = [os.path.join(icarus_path, "lib")] if icarus_compile: build_common_libs(icarus_build_dir, include_dir, share_lib_dir, dist) icarus_vpi_lib_name = build_vpi_lib( build_dir=icarus_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ICARUS", extra_lib=icarus_extra_lib, extra_lib_dir=icarus_extra_lib_path, ) _rename_safe( os.path.join(icarus_build_dir, icarus_vpi_lib_name), os.path.join(icarus_build_dir, "gpivpi.vpl"), ) # # Modelsim/Questa # logger.warning("Compiling interface libraries for Modelsim/Questa ...") vsim_path = find_executable("vopt") modelsim_build_dir = os.path.join(build_dir, "modelsi") modelsim_compile = True modelsim_extra_lib = [] modelsim_extra_lib_path = [] if os.name == "nt": if vsim_path is None: logger.warning( "Modelsim/Questa executable (vopt) not found. VPI interface will not be avaliable." ) modelsim_compile = False else: modelsim_bin_dir = os.path.dirname(vsim_path) modelsim_extra_lib = ["mtipli"] modelsim_extra_lib_path = [modelsim_bin_dir] if modelsim_compile: build_common_libs(modelsim_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=modelsim_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="MODELSIM", extra_lib=modelsim_extra_lib, extra_lib_dir=modelsim_extra_lib_path, ) if vsim_path is None: logger.warning( "Modelsim/Questa executable (vopt) executable not found. FLI interface will not be avaliable." ) else: modelsim_dir = os.path.dirname(os.path.dirname(vsim_path)) libfli = Extension( "libfli", include_dirs=[include_dir, os.path.join(modelsim_dir, "include")], libraries=["gpi", "gpilog", "stdc++"] + modelsim_extra_lib, library_dirs=[modelsim_build_dir] + modelsim_extra_lib_path, sources=[ os.path.join(share_lib_dir, "fli", "FliImpl.cpp"), os.path.join(share_lib_dir, "fli", "FliCbHdl.cpp"), os.path.join(share_lib_dir, "fli", "FliObjHdl.cpp"), ], extra_link_args=["-Wl,-rpath,$ORIGIN"], ) try: _build_lib(libfli, dist, modelsim_build_dir) except: # noqa: E722 logger.warning( "Building FLI intercae for Modelsim faild!" ) # some Modelsim version doesn not include FLI? # # GHDL # if os.name == "posix": logger.warning("Compiling interface libraries for GHDL ...") ghdl_build_dir = os.path.join(build_dir, "ghdl") build_common_libs(ghdl_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=ghdl_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="GHDL", ) # # IUS # if os.name == "posix": logger.warning("Compiling interface libraries for IUS ...") ius_build_dir = os.path.join(build_dir, "ius") build_common_libs(ius_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=ius_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="IUS", ) build_vhpi_lib( build_dir=ius_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="IUS", ) # # VCS # if os.name == "posix": logger.warning("Compiling interface libraries for VCS ...") vcs_build_dir = os.path.join(build_dir, "vcs") build_common_libs(vcs_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=vcs_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="VCS", ) # # Aldec # vsimsa_path = find_executable("vsimsa") if vsimsa_path is None: logger.warning( "Riviera executable not found. No VPI/VHPI interface will not be avaliable." ) else: logger.warning("Compiling interface libraries for Aldec ...") aldec_build_dir = os.path.join(build_dir, "aldec") aldec_path = os.path.dirname(vsimsa_path) aldec_extra_lib = ["aldecpli"] aldec_extra_lib_path = [aldec_path] build_common_libs(aldec_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=aldec_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ALDEC", extra_lib=aldec_extra_lib, extra_lib_dir=aldec_extra_lib_path, ) build_vhpi_lib( build_dir=aldec_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ALDEC", extra_lib=aldec_extra_lib, extra_lib_dir=aldec_extra_lib_path, ) # # Verilator # if os.name == "posix": logger.warning("Compiling interface libraries for Verilator ...") vcs_build_dir = os.path.join(build_dir, "verilator") build_common_libs(vcs_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=vcs_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="VERILATOR", ) return
def build(build_dir="cocotb_build"): logger = logging.getLogger(__name__) distutils.log.set_verbosity(0) # Disable logging comiliation commands in disutils # distutils.log.set_verbosity(distutils.log.DEBUG) # Set DEBUG level cfg_vars = distutils.sysconfig.get_config_vars() for key, value in cfg_vars.items(): if type(value) == str: cfg_vars[key] = value.replace("-Wstrict-prototypes", "") if sys.platform == "darwin": cfg_vars["LDSHARED"] = cfg_vars["LDSHARED"].replace("-bundle", "-dynamiclib") share_dir = os.path.join(os.path.dirname(__file__), "share") share_lib_dir = os.path.join(share_dir, "lib") build_dir = os.path.abspath(build_dir) include_dir = os.path.join(share_dir, "include") dist = Distribution() dist.parse_config_files() # # Icarus Verilog # logger.warning("Compiling interface libraries for Icarus Verilog ...") icarus_build_dir = os.path.join(build_dir, "icarus") icarus_compile = True icarus_extra_lib = [] icarus_extra_lib_path = [] if os.name == "nt": iverilog_path = find_executable("iverilog") if iverilog_path is None: logger.warning( "Icarus Verilog executable not found. VPI interface will not be avaliable." ) icarus_compile = False else: icarus_path = os.path.dirname(os.path.dirname(iverilog_path)) icarus_extra_lib = ["vpi"] icarus_extra_lib_path = [os.path.join(icarus_path, "lib")] if icarus_compile: build_common_libs(icarus_build_dir, include_dir, share_lib_dir, dist) icarus_vpi_lib_name = build_vpi_lib( build_dir=icarus_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ICARUS", extra_lib=icarus_extra_lib, extra_lib_dir=icarus_extra_lib_path, ) _rename_safe( os.path.join(icarus_build_dir, icarus_vpi_lib_name), os.path.join(icarus_build_dir, "gpivpi.vpl"), ) # # Modelsim/Questa # logger.warning("Compiling interface libraries for Modelsim/Questa ...") vsim_path = find_executable("vopt") modelsim_build_dir = os.path.join(build_dir, "modelsi") modelsim_compile = True modelsim_extra_lib = [] modelsim_extra_lib_path = [] if os.name == "nt": if vsim_path is None: logger.warning( "Modelsim/Questa executable (vopt) not found. VPI interface will not be avaliable." ) modelsim_compile = False else: modelsim_bin_dir = os.path.dirname(vsim_path) modelsim_extra_lib = ["mtipli"] modelsim_extra_lib_path = [modelsim_bin_dir] if modelsim_compile: build_common_libs(modelsim_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=modelsim_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="MODELSIM", extra_lib=modelsim_extra_lib, extra_lib_dir=modelsim_extra_lib_path, ) if vsim_path is None: logger.warning( "Modelsim/Questa executable (vopt) executable not found. FLI interface will not be available." ) else: modelsim_dir = os.path.dirname(os.path.dirname(vsim_path)) libfli = Extension( "libfli", include_dirs=[include_dir, os.path.join(modelsim_dir, "include")], libraries=["gpi", "gpilog", "stdc++"] + modelsim_extra_lib, library_dirs=[modelsim_build_dir] + modelsim_extra_lib_path, sources=[ os.path.join(share_lib_dir, "fli", "FliImpl.cpp"), os.path.join(share_lib_dir, "fli", "FliCbHdl.cpp"), os.path.join(share_lib_dir, "fli", "FliObjHdl.cpp"), ], extra_link_args=["-Wl,-rpath,$ORIGIN"], ) try: _build_lib(libfli, dist, modelsim_build_dir) except: # noqa: E722 logger.warning( "Building FLI intercae for Modelsim faild!" ) # some Modelsim version doesn not include FLI? # # GHDL # if os.name == "posix": logger.warning("Compiling interface libraries for GHDL ...") ghdl_build_dir = os.path.join(build_dir, "ghdl") build_common_libs(ghdl_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=ghdl_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="GHDL", ) # # IUS # if os.name == "posix": logger.warning("Compiling interface libraries for IUS ...") ius_build_dir = os.path.join(build_dir, "ius") build_common_libs(ius_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=ius_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="IUS", ) build_vhpi_lib( build_dir=ius_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="IUS", ) # # VCS # if os.name == "posix": logger.warning("Compiling interface libraries for VCS ...") vcs_build_dir = os.path.join(build_dir, "vcs") build_common_libs(vcs_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=vcs_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="VCS", ) # # Aldec # vsimsa_path = find_executable("vsimsa") if vsimsa_path is None: logger.warning( "Riviera executable not found. No VPI/VHPI interface will not be avaliable." ) else: logger.warning("Compiling interface libraries for Aldec ...") aldec_build_dir = os.path.join(build_dir, "aldec") aldec_path = os.path.dirname(vsimsa_path) aldec_extra_lib = ["aldecpli"] aldec_extra_lib_path = [aldec_path] build_common_libs(aldec_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=aldec_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ALDEC", extra_lib=aldec_extra_lib, extra_lib_dir=aldec_extra_lib_path, ) build_vhpi_lib( build_dir=aldec_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="ALDEC", extra_lib=aldec_extra_lib, extra_lib_dir=aldec_extra_lib_path, ) # # Verilator # if os.name == "posix": logger.warning("Compiling interface libraries for Verilator ...") vcs_build_dir = os.path.join(build_dir, "verilator") build_common_libs(vcs_build_dir, include_dir, share_lib_dir, dist) build_vpi_lib( build_dir=vcs_build_dir, include_dir=include_dir, share_lib_dir=share_lib_dir, dist=dist, sim_define="VERILATOR", ) return
38,514
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray: """ For a given grid compute the star shape center for each cell. The algorithm computes the half space intersections, by using the above method half_space_pt, of the spaces defined by the cell faces and the face normals. This is a wrapper method that operate on a grid. Parameters ---------- g: pp.Grid the grid as_nan: bool, optional Decide whether, in case some cells are not star-shaped return nan as new center. Otherwise an exception is raised (default behaviour). Returns ------- np.ndarray The new cell centers. """ # no need for 1d or 0d grids if g.dim < 2: return g.cell_centers # retrieve the faces and nodes faces, _, sgn = sps.find(g.cell_faces) nodes, _, _ = sps.find(g.face_nodes) # shift the nodes close to the origin, to avoid numerical problems when coordinates are # too big xn = g.nodes.copy() xn_shift = np.average(xn, axis=1) xn -= np.tile(xn_shift, (xn.shape[1], 1)).T # compute the star shape cell centers by constructing the half spaces of each cell # given by its faces and related normals cell_centers = np.zeros((3, g.num_cells)) for c in np.arange(g.num_cells): loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) faces_loc = faces[loc] loc_n = g.face_nodes.indptr[faces_loc] # make the normals coherent normal = np.multiply( sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc]) ) x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]] coords = np.concatenate((x0, x1), axis=1) # compute a point in the half space intersection of all cell faces try: cell_centers[:, c] = pp.half_space.half_space_interior_point( normal, (x1 + x0) / 2.0, coords ) except ValueError: # the cell is not star-shaped if as_nan: cell_centers[:, c] = np.array([np.nan, np.nan, np.nan]) else: raise ValueError( "Cell not star-shaped impossible to compute the centre" ) # shift back the computed cell centers and return them return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray: """ For a given grid compute the star shape center for each cell. The algorithm computes the half space intersections, by using the above method half_space_pt, of the spaces defined by the cell faces and the face normals. This is a wrapper method that operate on a grid. Parameters ---------- g: pp.Grid the grid as_nan: bool, optional Decide whether, in case some cells are not star-shaped return nan as star-shaped. Otherwise an exception is raised (default behaviour). Returns ------- np.ndarray The new cell centers. """ # no need for 1d or 0d grids if g.dim < 2: return g.cell_centers # retrieve the faces and nodes faces, _, sgn = sps.find(g.cell_faces) nodes, _, _ = sps.find(g.face_nodes) # shift the nodes close to the origin, to avoid numerical problems when coordinates are # too big xn = g.nodes.copy() xn_shift = np.average(xn, axis=1) xn -= np.tile(xn_shift, (xn.shape[1], 1)).T # compute the star shape cell centers by constructing the half spaces of each cell # given by its faces and related normals cell_centers = np.zeros((3, g.num_cells)) for c in np.arange(g.num_cells): loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) faces_loc = faces[loc] loc_n = g.face_nodes.indptr[faces_loc] # make the normals coherent normal = np.multiply( sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc]) ) x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]] coords = np.concatenate((x0, x1), axis=1) # compute a point in the half space intersection of all cell faces try: cell_centers[:, c] = pp.half_space.half_space_interior_point( normal, (x1 + x0) / 2.0, coords ) except ValueError: # the cell is not star-shaped if as_nan: cell_centers[:, c] = np.array([np.nan, np.nan, np.nan]) else: raise ValueError( "Cell not star-shaped impossible to compute the centre" ) # shift back the computed cell centers and return them return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
16,115
def get_type(cloud_id, install_code): """Try API call 'get_network_info' to see if target device is Legacy or Eagle-200.""" reader = FixedLegacyReader(cloud_id, install_code) try: response = reader.get_network_info() except UPDATE_ERRORS as error: _LOGGER.error("Failed to connect during setup: %s", error) raise CannotConnect from error # Branch to test if target is Legacy Model if ( "NetworkInfo" in response and response["NetworkInfo"].get("ModelId", None) == "Z109-EAGLE" ): return TYPE_LEGACY # Branch to test if target is Eagle-200 Model if ( "Response" in response and response["Response"].get("Command", None) == "get_network_info" ): return TYPE_EAGLE_200 # Catch-all if hardware ID tests fail return None
def get_type(cloud_id, install_code): """Try API call 'get_network_info' to see if target device is Legacy or Eagle-200.""" reader = FixedLegacyReader(cloud_id, install_code) try: response = reader.get_network_info() except UPDATE_ERRORS as error: _LOGGER.error("Failed to connect during setup: %s", error) raise CannotConnect from error # Branch to test if target is Legacy Model if ( "NetworkInfo" in response and response["NetworkInfo"].get("ModelId") == "Z109-EAGLE" ): return TYPE_LEGACY # Branch to test if target is Eagle-200 Model if ( "Response" in response and response["Response"].get("Command", None) == "get_network_info" ): return TYPE_EAGLE_200 # Catch-all if hardware ID tests fail return None
10,818
def check(obj): """Check if *obj* can be serialized. Parameters ---------- obj : object Returns -------- can_serialize : bool """ with io.BytesIO() as fout: pickler = NumbaPickler(fout) try: pickler.dump(obj) except pickle.PicklingError: return False else: return True
def check_serializable(obj): """Check if *obj* can be serialized. Parameters ---------- obj : object Returns -------- can_serialize : bool """ with io.BytesIO() as fout: pickler = NumbaPickler(fout) try: pickler.dump(obj) except pickle.PicklingError: return False else: return True
45,659
def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( "What is OncoPrint?" ), html.P( """ The OncoPrint component is used to view multiple genetic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the BioConductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, allowing the user to create complex bioinformatic apps or workflows that rely on crossfiltering. """ ), html.P( """ Read more about the component here: https://github.com/plotly/react-oncoprint """ ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6("Mutation colors"), html.P( "Select a mutation type and a color " "to customize its look." ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]),
def layout(): return html.Div(id='oncoprint-body', children=[ dash_bio.OncoPrint( id='oncoprint-chart', height=550, data=[] ), html.Div(id='oncoprint-control-tabs', children=[ dcc.Tabs( id='oncoprint-tabs', children=[ dcc.Tab( label='About', value='what-is', children=html.Div(className='oncoprint-tab', children=[ html.H4( "What is OncoPrint?" ), html.P( """ The OncoPrint component is used to view multiple genetic alteration events through an interactive and zoomable heatmap. It is a React/Dash port of the popular oncoPrint() function from the BioConductor R package. Under the hood, the rendering is done using Plotly.js built upon D3. Plotly's interactivity allows the user to bind clicks and hovers to genetic events, letting you create complex bioinformatics apps or workflows that rely on crossfiltering. """ ), html.P( """ Read more about the component here: https://github.com/plotly/react-oncoprint """ ) ]) ), dcc.Tab( label='Data', value='data', children=html.Div(className='oncoprint-tab', children=[ html.Div([ html.Div( className='oncoprint-option-name', children='Select dataset' ), dcc.Dropdown( id='oncoprint-dropdown', className='oncoprint-select', options=[ { 'label': '{}.json'.format(ds), 'value': ds } for ds in DATASETS ], value='cBioPortalData', ), ]), html.Hr( className='oncoprint-separator' ), html.Div([ html.H4('Hover, click, or event data'), html.Div( id='oncoprint-events' ), ]) ]) ), dcc.Tab( label='View', value='view', children=html.Div(className='oncoprint-tab', children=[ html.H4('Layout'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Overview' ), daq.ToggleSwitch( id='oncoprint-show-overview', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Legend' ), daq.ToggleSwitch( id='oncoprint-show-legend', label=['hide', 'show'], color='#009DFF', size=35, value=True ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Padding' ), dcc.Slider( className='oncoprint-slider', id='oncoprint-padding-input', value=0.05, min=0, max=0.1, step=0.01, marks={ '0': '0', '0.02': '0.02', '0.04': '0.04', '0.06': '0.06', '0.08': '0.08', '0.1': '0.1', }, ), html.Br(), html.Div( 'Adjust the padding (as percentage) ' 'between two tracks.' ), ], ), html.Hr(className='oncoprint-separator'), html.Div([ html.H4('Colors'), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Track color' ), html.P( 'Change the default background ' 'color for the tracks.' ), daq.ColorPicker( id='oncoprint-tracks-color', value={'hex': '#AAAAAA'} ), ], ), html.Hr(className='oncoprint-separator'), html.H6("Mutation colors"), html.P( "Select a mutation type and a color " "to customize its look." ), html.Div(children=[ html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation type' ), dcc.Dropdown( id='oncoprint-colorscale-mutation-dropdown', options=[ {'label': mut_type, 'value': mut_type} for mut_type in COLORSCALE_MUTATIONS_OPT ], value=COLORSCALE_MUTATIONS_OPT[0], ), ], ), html.Div( children=[ html.Div( className='oncoprint-option-name', children='Mutation color' ), daq.ColorPicker( id='oncoprint-mutation-color', value={'hex': COLORSCALE_COLORS_OPT[0]} ) ], ), ]) ]) ]) ) ] ) ]), dcc.Store(id='oncoprint-store'), ]),
25,755
def assign_solution(n, sns, variables_sol, constraints_dual, keep_references=False, keep_shadowprices=None): """ Helper function. Assigns the solution of a succesful optimization to the network. """ def set_from_frame(pnl, attr, df): if attr not in pnl: #use this for subnetworks_t pnl[attr] = df.reindex(n.snapshots) elif pnl[attr].empty: pnl[attr] = df.reindex(n.snapshots) else: pnl[attr].loc[sns, :] = df.reindex(columns=pnl[attr].columns) pop = not keep_references def map_solution(c, attr): variables = get_var(n, c, attr, pop=pop) predefined = True if (c, attr) not in lookup.index: predefined = False n.sols[c] = n.sols[c] if c in n.sols else Dict(df=pd.DataFrame(), pnl={}) n.solutions.at[(c, attr), 'in_comp'] = predefined if isinstance(variables, pd.DataFrame): # case that variables are timedependent n.solutions.at[(c, attr), 'pnl'] = True pnl = n.pnl(c) if predefined else n.sols[c].pnl values = variables.stack().map(variables_sol).unstack() if c in n.passive_branch_components: set_from_frame(pnl, 'p0', values) set_from_frame(pnl, 'p1', - values) elif c == 'Link': set_from_frame(pnl, 'p0', values) for i in ['1'] + additional_linkports(n): i_eff = '' if i == '1' else i eff = get_as_dense(n, 'Link', f'efficiency{i_eff}', sns) set_from_frame(pnl, f'p{i}', - values * eff) else: set_from_frame(pnl, attr, values) else: # case that variables are static n.solutions.at[(c, attr), 'pnl'] = False sol = variables.map(variables_sol) if predefined: non_ext = n.df(c)[attr] n.df(c)[attr + '_opt'] = sol.reindex(non_ext.index).fillna(non_ext) else: n.sols[c].df[attr] = sol n.sols = Dict() n.solutions = pd.DataFrame(index=n.variables.index, columns=['in_comp', 'pnl']) for c, attr in n.variables.index: map_solution(c, attr) # if nominal capcity was no variable set optimal value to nominal for c, attr in lookup.query('nominal').index.difference(n.variables.index): n.df(c)[attr+'_opt'] = n.df(c)[attr] # recalculate storageunit net dispatch if not n.df('StorageUnit').empty: c = 'StorageUnit' n.pnl(c)['p'] = n.pnl(c)['p_dispatch'] - n.pnl(c)['p_store'] # duals if keep_shadowprices == False: keep_shadowprices = [] sp = n.constraints.index if isinstance(keep_shadowprices, list): sp = sp[sp.isin(keep_shadowprices, level=0)] def map_dual(c, attr): # If c is a pypsa component name the dual is store at n.pnl(c) # or n.df(c). For the second case the index of the constraints have to # be a subset of n.df(c).index otherwise the dual is stored at # n.duals[c].df constraints = get_con(n, c, attr, pop=pop) is_pnl = isinstance(constraints, pd.DataFrame) # TODO: setting the sign is not very clear sign = 1 if 'upper' in attr or attr == 'marginal_price' else -1 n.dualvalues.at[(c, attr), 'pnl'] = is_pnl to_component = c in n.all_components if is_pnl: n.dualvalues.at[(c, attr), 'in_comp'] = to_component duals = constraints.stack().map(sign * constraints_dual).unstack() if c not in n.duals and not to_component: n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) pnl = n.pnl(c) if to_component else n.duals[c].pnl set_from_frame(pnl, attr, duals) else: # here to_component can change duals = constraints.map(sign * constraints_dual) if to_component: to_component = (duals.index.isin(n.df(c).index).all()) n.dualvalues.at[(c, attr), 'in_comp'] = to_component if c not in n.duals and not to_component: n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) df = n.df(c) if to_component else n.duals[c].df df[attr] = duals n.duals = Dict() n.dualvalues = pd.DataFrame(index=sp, columns=['in_comp', 'pnl']) # extract shadow prices attached to components for c, attr in sp: map_dual(c, attr) #correct prices for snapshot weightings n.buses_t.marginal_price.loc[sns] = n.buses_t.marginal_price.loc[sns].divide(n.snapshot_weightings.loc[sns],axis=0) # discard remaining if wanted if not keep_references: for c, attr in n.constraints.index.difference(sp): get_con(n, c, attr, pop) #load if len(n.loads): set_from_frame(n.pnl('Load'), 'p', get_as_dense(n, 'Load', 'p_set', sns)) #clean up vars and cons for c in list(n.vars): if n.vars[c].df.empty and n.vars[c].pnl == {}: n.vars.pop(c) for c in list(n.cons): if n.cons[c].df.empty and n.cons[c].pnl == {}: n.cons.pop(c) # recalculate injection ca = [('Generator', 'p', 'bus' ), ('Store', 'p', 'bus'), ('Load', 'p', 'bus'), ('StorageUnit', 'p', 'bus'), ('Link', 'p0', 'bus0'), ('Link', 'p1', 'bus1')] for i in additional_linkports(n): ca.append(('Link', f'p{i}', f'bus{i}')) sign = lambda c: n.df(c).sign if 'sign' in n.df(c) else -1 #sign for 'Link' n.buses_t.p = pd.concat( [n.pnl(c)[attr].mul(sign(c)).rename(columns=n.df(c)[group]) for c, attr, group in ca], axis=1).groupby(level=0, axis=1).sum()\ .reindex(columns=n.buses.index, fill_value=0) def v_ang_for_(sub): buses_i = sub.buses_o if len(buses_i) == 1: return pd.DataFrame(0, index=sns, columns=buses_i) sub.calculate_B_H(skip_pre=True) Z = pd.DataFrame(np.linalg.pinv((sub.B).todense()), buses_i, buses_i) Z -= Z[sub.slack_bus] return n.buses_t.p.reindex(columns=buses_i) @ Z n.buses_t.v_ang = (pd.concat([v_ang_for_(sub) for sub in n.sub_networks.obj], axis=1) .reindex(columns=n.buses.index, fill_value=0))
def assign_solution(n, sns, variables_sol, constraints_dual, keep_references=False, keep_shadowprices=None): """ Helper function. Assigns the solution of a succesful optimization to the network. """ def set_from_frame(pnl, attr, df): if attr not in pnl: #use this for subnetworks_t pnl[attr] = df.reindex(n.snapshots) elif pnl[attr].empty: pnl[attr] = df.reindex(n.snapshots) else: pnl[attr].loc[sns, :] = df.reindex(columns=pnl[attr].columns) pop = not keep_references def map_solution(c, attr): variables = get_var(n, c, attr, pop=pop) predefined = True if (c, attr) not in lookup.index: predefined = False n.sols[c] = n.sols[c] if c in n.sols else Dict(df=pd.DataFrame(), pnl={}) n.solutions.at[(c, attr), 'in_comp'] = predefined if isinstance(variables, pd.DataFrame): # case that variables are timedependent n.solutions.at[(c, attr), 'pnl'] = True pnl = n.pnl(c) if predefined else n.sols[c].pnl values = variables.stack().map(variables_sol).unstack() if c in n.passive_branch_components: set_from_frame(pnl, 'p0', values) set_from_frame(pnl, 'p1', - values) elif c == 'Link': set_from_frame(pnl, 'p0', values) for i in ['1'] + additional_linkports(n): i_eff = '' if i == '1' else i eff = get_as_dense(n, 'Link', f'efficiency{i_eff}', sns) set_from_frame(pnl, f'p{i}', - values * eff) else: set_from_frame(pnl, attr, values) else: # case that variables are static n.solutions.at[(c, attr), 'pnl'] = False sol = variables.map(variables_sol) if predefined: non_ext = n.df(c)[attr] n.df(c)[attr + '_opt'] = sol.reindex(non_ext.index).fillna(non_ext) else: n.sols[c].df[attr] = sol n.sols = Dict() n.solutions = pd.DataFrame(index=n.variables.index, columns=['in_comp', 'pnl']) for c, attr in n.variables.index: map_solution(c, attr) # if nominal capcity was no variable set optimal value to nominal for c, attr in lookup.query('nominal').index.difference(n.variables.index): n.df(c)[attr+'_opt'] = n.df(c)[attr] # recalculate storageunit net dispatch if not n.df('StorageUnit').empty: c = 'StorageUnit' n.pnl(c)['p'] = n.pnl(c)['p_dispatch'] - n.pnl(c)['p_store'] # duals if keep_shadowprices == False: keep_shadowprices = [] sp = n.constraints.index if isinstance(keep_shadowprices, list): sp = sp[sp.isin(keep_shadowprices, level=0)] def map_dual(c, attr): # If c is a pypsa component name the dual is store at n.pnl(c) # or n.df(c). For the second case the index of the constraints have to # be a subset of n.df(c).index otherwise the dual is stored at # n.duals[c].df constraints = get_con(n, c, attr, pop=pop) is_pnl = isinstance(constraints, pd.DataFrame) # TODO: setting the sign is not very clear sign = 1 if 'upper' in attr or attr == 'marginal_price' else -1 n.dualvalues.at[(c, attr), 'pnl'] = is_pnl to_component = c in n.all_components if is_pnl: n.dualvalues.at[(c, attr), 'in_comp'] = to_component duals = constraints.stack().map(sign * constraints_dual).unstack() if c not in n.duals and not to_component: n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) pnl = n.pnl(c) if to_component else n.duals[c].pnl set_from_frame(pnl, attr, duals) else: # here to_component can change duals = constraints.map(sign * constraints_dual) if to_component: to_component = (duals.index.isin(n.df(c).index).all()) n.dualvalues.at[(c, attr), 'in_comp'] = to_component if c not in n.duals and not to_component: n.duals[c] = Dict(df=pd.DataFrame(), pnl={}) df = n.df(c) if to_component else n.duals[c].df df[attr] = duals n.duals = Dict() n.dualvalues = pd.DataFrame(index=sp, columns=['in_comp', 'pnl']) # extract shadow prices attached to components for c, attr in sp: map_dual(c, attr) #correct prices for snapshot weightings n.buses_t.marginal_price.loc[sns] = n.buses_t.marginal_price.loc[sns].divide(n.snapshot_weightings.loc[sns], axis=0) # discard remaining if wanted if not keep_references: for c, attr in n.constraints.index.difference(sp): get_con(n, c, attr, pop) #load if len(n.loads): set_from_frame(n.pnl('Load'), 'p', get_as_dense(n, 'Load', 'p_set', sns)) #clean up vars and cons for c in list(n.vars): if n.vars[c].df.empty and n.vars[c].pnl == {}: n.vars.pop(c) for c in list(n.cons): if n.cons[c].df.empty and n.cons[c].pnl == {}: n.cons.pop(c) # recalculate injection ca = [('Generator', 'p', 'bus' ), ('Store', 'p', 'bus'), ('Load', 'p', 'bus'), ('StorageUnit', 'p', 'bus'), ('Link', 'p0', 'bus0'), ('Link', 'p1', 'bus1')] for i in additional_linkports(n): ca.append(('Link', f'p{i}', f'bus{i}')) sign = lambda c: n.df(c).sign if 'sign' in n.df(c) else -1 #sign for 'Link' n.buses_t.p = pd.concat( [n.pnl(c)[attr].mul(sign(c)).rename(columns=n.df(c)[group]) for c, attr, group in ca], axis=1).groupby(level=0, axis=1).sum()\ .reindex(columns=n.buses.index, fill_value=0) def v_ang_for_(sub): buses_i = sub.buses_o if len(buses_i) == 1: return pd.DataFrame(0, index=sns, columns=buses_i) sub.calculate_B_H(skip_pre=True) Z = pd.DataFrame(np.linalg.pinv((sub.B).todense()), buses_i, buses_i) Z -= Z[sub.slack_bus] return n.buses_t.p.reindex(columns=buses_i) @ Z n.buses_t.v_ang = (pd.concat([v_ang_for_(sub) for sub in n.sub_networks.obj], axis=1) .reindex(columns=n.buses.index, fill_value=0))
5,203
def demo_rgb2(): fig, ax = plt.subplots() ax_r, ax_g, ax_b = make_rgb_axes(ax, pad=0.02) r, g, b = get_rgb() im_r, im_g, im_b, im_rgb = make_cube(r, g, b) ax.imshow(im_rgb) ax_r.imshow(im_r) ax_g.imshow(im_g) ax_b.imshow(im_b) for ax in fig.axes: ax.tick_params(axis='both', direction='in', color='w') ax.spines[:].set_color("w")
def demo_rgb2(): fig, ax = plt.subplots() ax_r, ax_g, ax_b = make_rgb_axes(ax, pad=0.02) r, g, b = get_rgb() im_r, im_g, im_b, im_rgb = make_cube(r, g, b) ax.imshow(im_rgb) ax_r.imshow(im_r) ax_g.imshow(im_g) ax_b.imshow(im_b) for ax in fig.axes: ax.tick_params(direction='in', color='w') ax.spines[:].set_color("w")
45,844
def rgba_to_rgb(image: torch.Tensor, bg_rgb: Tuple = (0, 0, 0)) -> torch.Tensor: r"""Convert an image from RGBA to RGB. Args: image: RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`. bg_rgb: background RGB. Returns: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError(f"Input size must have a shape of (*, 4, H, W).Got {image.shape}") # unpack channels r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3) r_bg, g_bg, b_bg = bg_rgb # compute new channels a_one = torch.tensor(1.0) - a r_new: torch.Tensor = a_one * r_bg + a * r g_new: torch.Tensor = a_one * g_bg + a * g b_new: torch.Tensor = a_one * b_bg + a * b return torch.cat([r_new, g_new, b_new], dim=-3)
def rgba_to_rgb(image: torch.Tensor, bg_rgb: Tuple = (0, 0, 0)) -> torch.Tensor: r"""Convert an image from RGBA to RGB. Args: image: RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`. bg_rgb: background RGB. Returns: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(image)}") if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError(f"Input size must have a shape of (*, 4, H, W).Got {image.shape}") # unpack channels r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3) r_bg, g_bg, b_bg = bg_rgb # compute new channels a_one = torch.tensor(1.0, device=image.device, dtype=image.dtype) - a r_new: torch.Tensor = a_one * r_bg + a * r g_new: torch.Tensor = a_one * g_bg + a * g b_new: torch.Tensor = a_one * b_bg + a * b return torch.cat([r_new, g_new, b_new], dim=-3)
33,083
def test_matcher_on_instance(): """Test that matchers get registered on an object instance, not just on the class""" skill = _TestSkill(None, None) assert hasattr(skill.hello_skill, "matchers")
def test_matcher_on_instance(): """Test that matchers get registered on an object instance, not just on the class.""" skill = _TestSkill(None, None) assert hasattr(skill.hello_skill, "matchers")
48,625
def parse_link(line, d): split_content = line.split() d["type"] = split_content[0].split("/")[1] if "peer" in line and len(split_content) >= 3: d["peer_ip"] = split_content[1] d["peer"] = split_content[3] elif len(split_content) >= 2: d["mac"] = split_content[1] for item in split_content: if item == "promiscuity": index = split_content.index(item) d["promiscuity_mode"] = split_content[index + 1]
def parse_link(line, d): split_content = line.split() d["type"] = split_content[0].split("/")[1] if "peer" in line and len(split_content) >= 3: d["peer_ip"] = split_content[1] d["peer"] = split_content[3] elif len(split_content) >= 2: d["mac"] = split_content[1] if "promiscuity" in split_content: d["promiscuity"] = split_content[split_content.index('promiscuity') + 1]
49,885
def get_total_irradiance(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, albedo=.25, surface_type=None, model='isotropic', model_perez='allsitescomposite1990', **kwargs): r""" Determine total in-plane irradiance and its beam, sky diffuse and ground reflected components, using the specified sky diffuse irradiance model. .. math:: I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground} Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] albedo : numeric, default 0.25 Surface albedo. [unitless] surface_type : None or String, default None Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for the list of accepted values. model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- total_irrad : OrderedDict or DataFrame Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse', 'poa_ground_diffuse'``. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """ poa_sky_diffuse = get_sky_diffuse( surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model, model_perez=model_perez) poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo, surface_type) aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse) return irrads
def get_total_irradiance(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=None, airmass=None, albedo=.25, surface_type=None, model='isotropic', model_perez='allsitescomposite1990', **kwargs): r""" Determine total in-plane irradiance and its beam, sky diffuse and ground reflected components, using the specified sky diffuse irradiance model. .. math:: I_{tot} = I_{beam} + I_{sky diffuse} + I_{ground} Sky diffuse models include: * isotropic (default) * klucher * haydavies * reindl * king * perez Parameters ---------- surface_tilt : numeric Panel tilt from horizontal.[degree] surface_azimuth : numeric Panel azimuth from north. [degree] solar_zenith : numeric Solar zenith angle. [degree] solar_azimuth : numeric Solar azimuth angle. [degree] dni : numeric Direct Normal Irradiance. [W/m2] ghi : numeric Global horizontal irradiance. [W/m2] dhi : numeric Diffuse horizontal irradiance. [W/m2] dni_extra : None or numeric, default None Extraterrestrial direct normal irradiance. [W/m2] airmass : None or numeric, default None Relative airmass (not adjusted for pressure). [unitless] albedo : numeric, default 0.25 Surface albedo. [unitless] surface_type : None or String, default None Surface type. See :py:func:`~pvlib.irradiance.grounddiffuse` for the list of accepted values. model : String, default 'isotropic' Irradiance model. Can be one of 'isotropic', 'klucher', 'haydavies', 'reindl', 'king', 'perez'. model_perez : String, default 'allsitescomposite1990' Used only if model='perez'. See :py:func:`~pvlib.irradiance.perez`. Returns ------- total_irrad : OrderedDict or DataFrame Contains keys/columns ``'poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse', 'poa_ground_diffuse'``. Raises ------ ValueError If model is one of ``'haydavies'``, ``'reindl'``, or ``'perez'`` and ``dni_extra`` is ``None``. Notes ----- Models 'haydavies', 'reindl', or 'perez' require 'dni_extra'. Values can be calculated using :py:func:`~pvlib.irradiance.get_extra_radiation`. The 'perez' model requires relative airmass ('airmass') as input. If 'airmass' is not provided, it is calculated usign the defaults in :py:func:`~pvlib.irradiance.get_relative_airmass`. """ poa_sky_diffuse = get_sky_diffuse( surface_tilt, surface_azimuth, solar_zenith, solar_azimuth, dni, ghi, dhi, dni_extra=dni_extra, airmass=airmass, model=model, model_perez=model_perez) poa_ground_diffuse = get_ground_diffuse(surface_tilt, ghi, albedo, surface_type) aoi_ = aoi(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) irrads = poa_components(aoi_, dni, poa_sky_diffuse, poa_ground_diffuse) return irrads
33,235
def auto_model(layout, scan_length=None, one_vs_rest=False): """Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses dummy contrasts at each other level present to aggregate these results up. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` A BIDSLayout instance scan_length : int Scan length for loading event variables in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest : bool Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns ------- list list of model dictionaries for each task """ base_name = layout._root.name tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model["Name"] = "_".join([base_name, task_name]) model["Description"] = ("Autogenerated model for the %s task from %s" % (task_name, base_name)) model["BIDSModelVersion"]= "1.0.0" model["Input"] = {"task": [task_name]} nodes = [] # Make run level block transformations = OrderedDict( Transformer='pybids-transforms-v1', Instructions=[ OrderedDict( Name='Factor', Input='trial_type' ) ] ) run = OrderedDict(Level='Run', Name='Run', GroupBy=['run', 'subject'], Transformations=transformations) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = ["trial_type." + tt for tt in trial_types] run_model = OrderedDict(Type='glm', X=trial_type_factors) # Add HRF run_model['HRF'] = OrderedDict( Variables=trial_type_factors, Model="DoubleGamma", Parameters=OrderedDict( PeakDelay=3, PeakDispersion=6, UndershootDelay=10, UndershootDispersion=12, PeakUndershootRatio=0.2 ) ) run["Model"] = run_model if one_vs_rest: # If there are multiple trial types, build contrasts contrasts = [] for tt in trial_types: cdict = OrderedDict() if len(trial_types) > 1: cdict["Name"] = "run_" + tt + "_vs_others" else: cdict["Name"] = "run_" + tt cdict["ConditionList"] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict["Weights"] = list(weights) cdict["Test"] = "t" contrasts.append(cdict) run["Contrasts"] = contrasts nodes.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Session", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Subject", contrast_names)) # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Dataset", contrast_names)) model["Nodes"] = nodes task_models.append(model) return task_models
def auto_model(layout, scan_length=None, one_vs_rest=False): """Create a simple default model for each of the tasks in a BIDSLayout. Contrasts each trial type against all other trial types and trial types at the run level and then uses dummy contrasts at each other level present to aggregate these results up. Parameters ---------- layout : :obj:`bids.layout.BIDSLayout` A BIDSLayout instance scan_length : int Scan length for loading event variables in cases where the scan length can not be read from the nifti. Primarily for testing. one_vs_rest : bool Set to True if you would like to autogenerate contrasts of each trial type against everyother trialtype. Returns ------- list list of model dictionaries for each task """ base_name = layout._root.name tasks = layout.entities['task'].unique() task_models = [] for task_name in tasks: # Populate model meta-data model = OrderedDict() model["Name"] = "_".join([base_name, task_name]) model["Description"] = ("Autogenerated model for the %s task from %s" % (task_name, base_name)) model["BIDSModelVersion"]= "1.0.0" model["Input"] = {"task": [task_name]} nodes = [] # Make run level block transformations = OrderedDict( Transformer='pybids-transforms-v1', Instructions=[ OrderedDict( Name='Factor', Input='trial_type' ) ] ) run = OrderedDict(Level='Run', Name='Run', GroupBy=['run', 'subject'], Transformations=transformations) # Get trial types run_nodes = load_variables(layout, task=task_name, levels=['run'], scan_length=scan_length) evs = [] for n in run_nodes.nodes: evs.extend(n.variables['trial_type'].values.values) trial_types = np.unique(evs) trial_type_factors = ["trial_type." + tt for tt in trial_types] run_model = OrderedDict(Type='glm', X=trial_type_factors) # Add HRF run_model['HRF'] = OrderedDict( Variables=trial_type_factors, Model="DoubleGamma", Parameters=OrderedDict( PeakDelay=3, PeakDispersion=6, UndershootDelay=10, UndershootDispersion=12, PeakUndershootRatio=0.2 ) ) run["Model"] = run_model if one_vs_rest: # If there are multiple trial types, build contrasts contrasts = [] for tt in trial_types: cdict = OrderedDict() if len(trial_types) > 1: cdict["Name"] = "run_" + tt + "_vs_others" else: cdict["Name"] = "run_" + tt cdict["ConditionList"] = trial_type_factors # Calculate weights for contrast weights = np.ones(len(trial_types)) try: weights[trial_types != tt] = -1.0 / (len(trial_types) - 1) except ZeroDivisionError: pass cdict["Weights"] = list(weights) cdict["Test"] = "t" contrasts.append(cdict) run["Contrasts"] = contrasts nodes.append(run) if one_vs_rest: # if there are multiple sessions, t-test run level contrasts at # session level sessions = layout.get_sessions() if len(sessions) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Session", contrast_names)) subjects = layout.get_subjects() if len(subjects) > 1: # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Subject", contrast_names, "meta")) # get contrasts names from previous block contrast_names = [cc["Name"] for cc in nodes[-1]["Contrasts"]] nodes.append(_make_passthrough_contrast( "Dataset", contrast_names)) model["Nodes"] = nodes task_models.append(model) return task_models
53,778
def get_bounds_pu(n, c, sns, index=slice(None), attr=None): """ Getter function to retrieve the per unit bounds of a given compoent for given snapshots and possible subset of elements (e.g. non-extendables). Depending on the attr you can further specify the bounds of the variable you are looking at, e.g. p_store for storage units. Parameters ---------- n : pypsa.Network c : string Component name, e.g. "Generator", "Line". sns : pandas.Index/pandas.DateTimeIndex set of snapshots for the bounds index : pd.Index, default None Subset of the component elements. If None (default) bounds of all elements are returned. attr : string, default None attribute name for the bounds, e.g. "p", "s", "p_store" """ min_pu_str = nominal_attrs[c].replace('nom', 'min_pu') max_pu_str = nominal_attrs[c].replace('nom', 'max_pu') max_pu = get_switchable_as_dense(n, c, max_pu_str, sns) if c in n.passive_branch_components: min_pu = - max_pu elif c == 'StorageUnit': min_pu = pd.DataFrame(0, max_pu.index, max_pu.columns) if attr == 'p_store': max_pu = - get_switchable_as_dense(n, c, min_pu_str, sns) if attr == 'state_of_charge': max_pu = expand_series(n.df(c).max_hours, sns).T min_pu = pd.DataFrame(0, *max_pu.axes) else: min_pu = get_switchable_as_dense(n, c, min_pu_str, sns) # set to zero if not active if isinstance(sns, pd.MultiIndex): for inv_p in sns.levels[0]: max_pu.loc[inv_p][max_pu.columns[~get_active_assets(n,c,inv_p,sns)]] = 0 min_pu.loc[inv_p][max_pu.columns[~get_active_assets(n,c,inv_p,sns)]] = 0 return min_pu[index], max_pu[index]
def get_bounds_pu(n, c, sns, index=slice(None), attr=None): """ Getter function to retrieve the per unit bounds of a given compoent for given snapshots and possible subset of elements (e.g. non-extendables). Depending on the attr you can further specify the bounds of the variable you are looking at, e.g. p_store for storage units. Parameters ---------- n : pypsa.Network c : string Component name, e.g. "Generator", "Line". sns : pandas.Index/pandas.DateTimeIndex set of snapshots for the bounds index : pd.Index, default None Subset of the component elements. If None (default) bounds of all elements are returned. attr : string, default None attribute name for the bounds, e.g. "p", "s", "p_store" """ min_pu_str = nominal_attrs[c].replace('nom', 'min_pu') max_pu_str = nominal_attrs[c].replace('nom', 'max_pu') max_pu = get_switchable_as_dense(n, c, max_pu_str, sns) if c in n.passive_branch_components: min_pu = - max_pu elif c == 'StorageUnit': min_pu = pd.DataFrame(0, max_pu.index, max_pu.columns) if attr == 'p_store': max_pu = - get_switchable_as_dense(n, c, min_pu_str, sns) if attr == 'state_of_charge': max_pu = expand_series(n.df(c).max_hours, sns).T min_pu = pd.DataFrame(0, *max_pu.axes) else: min_pu = get_switchable_as_dense(n, c, min_pu_str, sns) # set to zero if not active if isinstance(sns, pd.MultiIndex): for inv_p in sns.levels[0]: inactive = ~get_active_assets(n, c, inv_p, sns) max_pu.loc[inv_p, inactive] = 0 min_pu.loc[inv_p, inactive] = 0 return min_pu[index], max_pu[index]
27,701
def test_stderr_write_returns_len(): """Write on Encoded files, namely captured stderr, should return number of characters written.""" assert sys.stderr.write("Foo") == 3
def test_stderr_write_returns_len(capsys): """Write on Encoded files, namely captured stderr, should return number of characters written.""" assert sys.stderr.write("Foo") == 3
14,035
def plot_dataframe( df, column=None, cmap=None, color=None, ax=None, cax=None, categorical=False, legend=False, scheme=None, k=5, vmin=None, vmax=None, markersize=None, figsize=None, legend_kwds=None, categories=None, classification_kwds=None, missing_kwds=None, aspect="auto", **style_kwds ): """ Plot a GeoDataFrame. Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Parameters ---------- df : GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str, np.array, pd.Series (default None) The name of the dataframe column, np.array, or pd.Series to be plotted. If np.array or pd.Series are used then it must have same length as dataframe. Values are used to color the plot. Ignored if `color` is also set. cmap : str (default None) The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot cax : matplotlib.pyplot Artist (default None) axes on which to draw the legend in case of color map. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns, this will be set to True. legend : bool (default False) Plot a legend. Ignored if no `column` is given, or if `color` is given. scheme : str (default None) Name of a choropleth classification scheme (requires mapclassify). A mapclassify.MapClassifier object will be used under the hood. Supported are all schemes provided by mapclassify (e.g. 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled', 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced', 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks', 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean', 'UserDefined'). Arguments can be passed in classification_kwds. k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. markersize : str or float or sequence (default None) Only applies to point geometries within a frame. If a str, will use the values in the column of the frame specified by markersize to set the size of markers. Otherwise can be a value to apply to all points, or a sequence of the same length as the number of points. figsize : tuple of integers (default None) Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. legend_kwds : dict (default None) Keyword arguments to pass to matplotlib.pyplot.legend() or matplotlib.pyplot.colorbar(). Additional accepted keywords when `scheme` is specified: fmt : string A formatting specification for the bin edges of the classes in the legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. labels : list-like A list of legend labels to override the auto-generated labels. Needs to have the same number of elements as the number of classes (`k`). categories : list-like Ordered list-like object of categories to be used for categorical plot. classification_kwds : dict (default None) Keyword arguments to pass to mapclassify missing_kwds : dict (default None) Keyword arguments specifying color options (as style_kwds) to be passed on to geometries with missing values in addition to or overwriting other style kwds. If None, geometries with missing values are not plotted. aspect : 'auto', 'equal', None or float (default 'auto') Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if however data are not projected (coordinates are long/lat), the aspect is by default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat square appears square in the middle of the plot. This implies an Equirectangular projection. If None, the aspect of ax won't be changed, useful when passing in an existing ax and needing to keep the original aspect. It can also be set manually (float) as the ratio of y-unit to x-unit. **style_kwds : dict Style options to be passed on to the actual plot function, such as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``, ``alpha``. Returns ------- ax : matplotlib axes instance """ if "colormap" in style_kwds: warnings.warn( "'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning, ) cmap = style_kwds.pop("colormap") if "axes" in style_kwds: warnings.warn( "'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning, ) ax = style_kwds.pop("axes") if column is not None and color is not None: warnings.warn( "Only specify one of 'column' or 'color'. Using 'color'.", UserWarning ) column = None try: import matplotlib.pyplot as plt except ImportError: raise ImportError( "The matplotlib package is required for plotting in geopandas. " "You can install it using 'conda install -c conda-forge matplotlib' or " "'pip install matplotlib'." ) if ax is None: if cax is not None: raise ValueError("'ax' can not be None if 'cax' is not.") fig, ax = plt.subplots(figsize=figsize) if aspect == "auto": if df.crs and df.crs.is_geographic: bounds = df.total_bounds y_coord = np.mean([bounds[1], bounds[3]]) ax.set_aspect(1 / np.cos(y_coord * np.pi / 180)) # formula ported from R package sp # https://github.com/edzer/sp/blob/master/R/mapasp.R else: ax.set_aspect("equal") elif aspect == "equal": ax.set_aspect("equal") elif aspect is not None: ax.set_aspect(aspect) if df.empty: warnings.warn( "The GeoDataFrame you are attempting to plot is " "empty. Nothing has been displayed.", UserWarning, ) return ax if isinstance(markersize, str): markersize = df[markersize].values if column is None: return plot_series( df.geometry, cmap=cmap, color=color, ax=ax, figsize=figsize, markersize=markersize, aspect=aspect, **style_kwds ) # To accept pd.Series and np.arrays as column if isinstance(column, (np.ndarray, pd.Series)): if column.shape[0] != df.shape[0]: raise ValueError( "The dataframe and given column have different number of rows." ) else: values = column else: values = df[column] if pd.api.types.is_categorical_dtype(values.dtype): if categories is not None: raise ValueError( "Cannot specify 'categories' when column has categorical dtype" ) categorical = True elif values.dtype is np.dtype("O") or categories: categorical = True nan_idx = np.asarray(pd.isna(values), dtype="bool") # Define `values` as a Series if categorical: if cmap is None: cmap = "tab10" cat = pd.Categorical(values, categories=categories) categories = list(cat.categories) # values missing in the Categorical but not in original values missing = list(np.unique(values[~nan_idx & cat.isna()])) if missing: raise ValueError( "Column contains values not listed in categories. " "Missing categories: {}.".format(missing) ) values = cat.codes[~nan_idx] vmin = 0 if vmin is None else vmin vmax = len(categories) - 1 if vmax is None else vmax if scheme is not None: if classification_kwds is None: classification_kwds = {} if "k" not in classification_kwds: classification_kwds["k"] = k binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds) # set categorical to True for creating the legend categorical = True if legend_kwds is not None and "labels" in legend_kwds: if len(legend_kwds["labels"]) != binning.k: raise ValueError( "Number of labels must match number of bins, " "received {} labels for {} bins".format( len(legend_kwds["labels"]), binning.k ) ) else: categories = list(legend_kwds.pop("labels")) else: fmt = "{:.2f}" if legend_kwds is not None and "fmt" in legend_kwds: fmt = legend_kwds.pop("fmt") categories = binning.get_legend_classes(fmt) values = np.array(binning.yb) # fill values with placeholder where were NaNs originally to map them properly # (after removing them in categorical or scheme) if categorical: for n in np.where(nan_idx)[0]: values = np.insert(values, n, values[0]) mn = values[~np.isnan(values)].min() if vmin is None else vmin mx = values[~np.isnan(values)].max() if vmax is None else vmax # decompose GeometryCollections geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix="Geom") values = np.take(values, multiindex, axis=0) nan_idx = np.take(nan_idx, multiindex, axis=0) expl_series = geopandas.GeoSeries(geoms) geom_types = expl_series.type poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon")) line_idx = np.asarray( (geom_types == "LineString") | (geom_types == "MultiLineString") | (geom_types == "LinearRing") ) point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint")) # plot all Polygons and all MultiPolygon components in the same collection polys = expl_series[poly_idx & np.invert(nan_idx)] subset = values[poly_idx & np.invert(nan_idx)] if not polys.empty: _plot_polygon_collection( ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all LineStrings and MultiLineString components in same collection lines = expl_series[line_idx & np.invert(nan_idx)] subset = values[line_idx & np.invert(nan_idx)] if not lines.empty: _plot_linestring_collection( ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all Points in the same collection points = expl_series[point_idx & np.invert(nan_idx)] subset = values[point_idx & np.invert(nan_idx)] if not points.empty: if isinstance(markersize, np.ndarray): markersize = np.take(markersize, multiindex, axis=0) markersize = markersize[point_idx & np.invert(nan_idx)] _plot_point_collection( ax, points, subset, vmin=mn, vmax=mx, markersize=markersize, cmap=cmap, **style_kwds ) if missing_kwds is not None: if color: if "color" not in missing_kwds: missing_kwds["color"] = color merged_kwds = style_kwds.copy() merged_kwds.update(missing_kwds) plot_series(expl_series[nan_idx], ax=ax, **merged_kwds) if legend and not color: if legend_kwds is None: legend_kwds = {} if "fmt" in legend_kwds: legend_kwds.pop("fmt") from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm norm = style_kwds.get("norm", None) if not norm: norm = Normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) if categorical: patches = [] for value, cat in enumerate(categories): patches.append( Line2D( [0], [0], linestyle="none", marker="o", alpha=style_kwds.get("alpha", 1), markersize=10, markerfacecolor=n_cmap.to_rgba(value), markeredgewidth=0, ) ) if missing_kwds is not None: if "color" in merged_kwds: merged_kwds["facecolor"] = merged_kwds["color"] patches.append( Line2D( [0], [0], linestyle="none", marker="o", alpha=merged_kwds.get("alpha", 1), markersize=10, markerfacecolor=merged_kwds.get("facecolor", None), markeredgecolor=merged_kwds.get("edgecolor", None), markeredgewidth=merged_kwds.get( "linewidth", 1 if merged_kwds.get("edgecolor", False) else 0 ), ) ) categories.append(merged_kwds.get("label", "NaN")) legend_kwds.setdefault("numpoints", 1) legend_kwds.setdefault("loc", "best") ax.legend(patches, categories, **legend_kwds) else: if cax is not None: legend_kwds.setdefault("cax", cax) else: legend_kwds.setdefault("ax", ax) n_cmap.set_array([]) ax.get_figure().colorbar(n_cmap, **legend_kwds) plt.draw() return ax
def plot_dataframe( df, column=None, cmap=None, color=None, ax=None, cax=None, categorical=False, legend=False, scheme=None, k=5, vmin=None, vmax=None, markersize=None, figsize=None, legend_kwds=None, categories=None, classification_kwds=None, missing_kwds=None, aspect="auto", **style_kwds ): """ Plot a GeoDataFrame. Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Parameters ---------- df : GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str, np.array, pd.Series (default None) The name of the dataframe column, np.array, or pd.Series to be plotted. If np.array or pd.Series are used then it must have same length as dataframe. Values are used to color the plot. Ignored if `color` is also set. cmap : str (default None) The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot cax : matplotlib.pyplot Artist (default None) axes on which to draw the legend in case of color map. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns, this will be set to True. legend : bool (default False) Plot a legend. Ignored if no `column` is given, or if `color` is given. scheme : str (default None) Name of a choropleth classification scheme (requires mapclassify). A mapclassify.MapClassifier object will be used under the hood. Supported are all schemes provided by mapclassify (e.g. 'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled', 'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced', 'JenksCaspallSampled', 'MaxP', 'MaximumBreaks', 'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean', 'UserDefined'). Arguments can be passed in classification_kwds. k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. markersize : str or float or sequence (default None) Only applies to point geometries within a frame. If a str, will use the values in the column of the frame specified by markersize to set the size of markers. Otherwise can be a value to apply to all points, or a sequence of the same length as the number of points. figsize : tuple of integers (default None) Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. legend_kwds : dict (default None) Keyword arguments to pass to matplotlib.pyplot.legend() or matplotlib.pyplot.colorbar(). Additional accepted keywords when `scheme` is specified: fmt : string A formatting specification for the bin edges of the classes in the legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. labels : list-like A list of legend labels to override the auto-generated labels. Needs to have the same number of elements as the number of classes (`k`). categories : list-like Ordered list-like object of categories to be used for categorical plot. classification_kwds : dict (default None) Keyword arguments to pass to mapclassify missing_kwds : dict (default None) Keyword arguments specifying color options (as style_kwds) to be passed on to geometries with missing values in addition to or overwriting other style kwds. If None, geometries with missing values are not plotted. aspect : 'auto', 'equal', None or float (default 'auto') Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if however data are not projected (coordinates are long/lat), the aspect is by default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat square appears square in the middle of the plot. This implies an Equirectangular projection. If None, the aspect of ax won't be changed. It can also be set manually (float) as the ratio of y-unit to x-unit. **style_kwds : dict Style options to be passed on to the actual plot function, such as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``, ``alpha``. Returns ------- ax : matplotlib axes instance """ if "colormap" in style_kwds: warnings.warn( "'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning, ) cmap = style_kwds.pop("colormap") if "axes" in style_kwds: warnings.warn( "'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning, ) ax = style_kwds.pop("axes") if column is not None and color is not None: warnings.warn( "Only specify one of 'column' or 'color'. Using 'color'.", UserWarning ) column = None try: import matplotlib.pyplot as plt except ImportError: raise ImportError( "The matplotlib package is required for plotting in geopandas. " "You can install it using 'conda install -c conda-forge matplotlib' or " "'pip install matplotlib'." ) if ax is None: if cax is not None: raise ValueError("'ax' can not be None if 'cax' is not.") fig, ax = plt.subplots(figsize=figsize) if aspect == "auto": if df.crs and df.crs.is_geographic: bounds = df.total_bounds y_coord = np.mean([bounds[1], bounds[3]]) ax.set_aspect(1 / np.cos(y_coord * np.pi / 180)) # formula ported from R package sp # https://github.com/edzer/sp/blob/master/R/mapasp.R else: ax.set_aspect("equal") elif aspect == "equal": ax.set_aspect("equal") elif aspect is not None: ax.set_aspect(aspect) if df.empty: warnings.warn( "The GeoDataFrame you are attempting to plot is " "empty. Nothing has been displayed.", UserWarning, ) return ax if isinstance(markersize, str): markersize = df[markersize].values if column is None: return plot_series( df.geometry, cmap=cmap, color=color, ax=ax, figsize=figsize, markersize=markersize, aspect=aspect, **style_kwds ) # To accept pd.Series and np.arrays as column if isinstance(column, (np.ndarray, pd.Series)): if column.shape[0] != df.shape[0]: raise ValueError( "The dataframe and given column have different number of rows." ) else: values = column else: values = df[column] if pd.api.types.is_categorical_dtype(values.dtype): if categories is not None: raise ValueError( "Cannot specify 'categories' when column has categorical dtype" ) categorical = True elif values.dtype is np.dtype("O") or categories: categorical = True nan_idx = np.asarray(pd.isna(values), dtype="bool") # Define `values` as a Series if categorical: if cmap is None: cmap = "tab10" cat = pd.Categorical(values, categories=categories) categories = list(cat.categories) # values missing in the Categorical but not in original values missing = list(np.unique(values[~nan_idx & cat.isna()])) if missing: raise ValueError( "Column contains values not listed in categories. " "Missing categories: {}.".format(missing) ) values = cat.codes[~nan_idx] vmin = 0 if vmin is None else vmin vmax = len(categories) - 1 if vmax is None else vmax if scheme is not None: if classification_kwds is None: classification_kwds = {} if "k" not in classification_kwds: classification_kwds["k"] = k binning = _mapclassify_choro(values[~nan_idx], scheme, **classification_kwds) # set categorical to True for creating the legend categorical = True if legend_kwds is not None and "labels" in legend_kwds: if len(legend_kwds["labels"]) != binning.k: raise ValueError( "Number of labels must match number of bins, " "received {} labels for {} bins".format( len(legend_kwds["labels"]), binning.k ) ) else: categories = list(legend_kwds.pop("labels")) else: fmt = "{:.2f}" if legend_kwds is not None and "fmt" in legend_kwds: fmt = legend_kwds.pop("fmt") categories = binning.get_legend_classes(fmt) values = np.array(binning.yb) # fill values with placeholder where were NaNs originally to map them properly # (after removing them in categorical or scheme) if categorical: for n in np.where(nan_idx)[0]: values = np.insert(values, n, values[0]) mn = values[~np.isnan(values)].min() if vmin is None else vmin mx = values[~np.isnan(values)].max() if vmax is None else vmax # decompose GeometryCollections geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix="Geom") values = np.take(values, multiindex, axis=0) nan_idx = np.take(nan_idx, multiindex, axis=0) expl_series = geopandas.GeoSeries(geoms) geom_types = expl_series.type poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon")) line_idx = np.asarray( (geom_types == "LineString") | (geom_types == "MultiLineString") | (geom_types == "LinearRing") ) point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint")) # plot all Polygons and all MultiPolygon components in the same collection polys = expl_series[poly_idx & np.invert(nan_idx)] subset = values[poly_idx & np.invert(nan_idx)] if not polys.empty: _plot_polygon_collection( ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all LineStrings and MultiLineString components in same collection lines = expl_series[line_idx & np.invert(nan_idx)] subset = values[line_idx & np.invert(nan_idx)] if not lines.empty: _plot_linestring_collection( ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds ) # plot all Points in the same collection points = expl_series[point_idx & np.invert(nan_idx)] subset = values[point_idx & np.invert(nan_idx)] if not points.empty: if isinstance(markersize, np.ndarray): markersize = np.take(markersize, multiindex, axis=0) markersize = markersize[point_idx & np.invert(nan_idx)] _plot_point_collection( ax, points, subset, vmin=mn, vmax=mx, markersize=markersize, cmap=cmap, **style_kwds ) if missing_kwds is not None: if color: if "color" not in missing_kwds: missing_kwds["color"] = color merged_kwds = style_kwds.copy() merged_kwds.update(missing_kwds) plot_series(expl_series[nan_idx], ax=ax, **merged_kwds) if legend and not color: if legend_kwds is None: legend_kwds = {} if "fmt" in legend_kwds: legend_kwds.pop("fmt") from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm norm = style_kwds.get("norm", None) if not norm: norm = Normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) if categorical: patches = [] for value, cat in enumerate(categories): patches.append( Line2D( [0], [0], linestyle="none", marker="o", alpha=style_kwds.get("alpha", 1), markersize=10, markerfacecolor=n_cmap.to_rgba(value), markeredgewidth=0, ) ) if missing_kwds is not None: if "color" in merged_kwds: merged_kwds["facecolor"] = merged_kwds["color"] patches.append( Line2D( [0], [0], linestyle="none", marker="o", alpha=merged_kwds.get("alpha", 1), markersize=10, markerfacecolor=merged_kwds.get("facecolor", None), markeredgecolor=merged_kwds.get("edgecolor", None), markeredgewidth=merged_kwds.get( "linewidth", 1 if merged_kwds.get("edgecolor", False) else 0 ), ) ) categories.append(merged_kwds.get("label", "NaN")) legend_kwds.setdefault("numpoints", 1) legend_kwds.setdefault("loc", "best") ax.legend(patches, categories, **legend_kwds) else: if cax is not None: legend_kwds.setdefault("cax", cax) else: legend_kwds.setdefault("ax", ax) n_cmap.set_array([]) ax.get_figure().colorbar(n_cmap, **legend_kwds) plt.draw() return ax
30,395
def find_groups_api_response_to_context(api_response): groups_list = list() for group in api_response['data'][0]['folders']: group_entry = { 'Name': group['description'], 'Source': group['source'], 'ID': group['id'], 'NumberOfUsers': group['userCount'], 'ParentID': group['parentId'], 'NumberOfChildGroups': group['folderCount'] } groups_list.append(group_entry) return {'Mimecast.Groups(val.ID && val.ID == obj.ID)': groups_list}
def find_groups_api_response_to_context(api_response): groups_list = list() for group in api_response['data'][0]['folders']: group_entry = { 'Name': group['description'], 'Source': group['source'], 'ID': group['id'], 'NumberOfUsers': group['userCount'], 'ParentID': group['parentId'], 'NumberOfChildGroups': group['folderCount'] } groups_list.append(group_entry) return {'Mimecast.Group(val.ID && val.ID == obj.ID)': groups_list}
22,877
def diropenbox(msg=None, title=None, default=None): """ A dialog to get a directory name. Returns the name of a directory, or None if user chose to cancel. If the "default" argument specifies a directory name, and that directory exists, then the dialog box will start with that directory. :param str msg: used in the window title :param str title: the window title :param str default: starting directory when dialog opens :return: Normalized path selected by user """ title = ut.getFileDialogTitle(msg, title) localRoot = tk.Tk() localRoot.withdraw() localRoot.lift() localRoot.attributes('-topmost', 1) localRoot.attributes('-topmost', 0) if not default: default = None localRoot.update() #fix ghost window issue #119 on mac. f = ut.tk_FileDialog.askdirectory( parent=localRoot, title=title, initialdir=default, initialfile=None ) localRoot.destroy() if not f: return None return os.path.normpath(f)
def diropenbox(msg=None, title=None, default=None): """ A dialog to get a directory name. Returns the name of a directory, or None if user chose to cancel. If the "default" argument specifies a directory name, and that directory exists, then the dialog box will start with that directory. :param str msg: used in the window title on some platforms :param str title: the window title :param str default: starting directory when dialog opens :return: Normalized path selected by user """ title = ut.getFileDialogTitle(msg, title) localRoot = tk.Tk() localRoot.withdraw() localRoot.lift() localRoot.attributes('-topmost', 1) localRoot.attributes('-topmost', 0) if not default: default = None localRoot.update() #fix ghost window issue #119 on mac. f = ut.tk_FileDialog.askdirectory( parent=localRoot, title=title, initialdir=default, initialfile=None ) localRoot.destroy() if not f: return None return os.path.normpath(f)
39,988
def _use_external_script_type(client) -> bool: if client.features.model == "1" and client.version > (1, 10, 5): return True if client.features.model == "T" and client.version > (2, 4, 3): return True return False
def _use_external_script_type(client) -> bool: if client.features.model == "1" and client.version > (1, 10, 5): return True if client.features.model == "T" and client.version > (2, 5, 1): return True return False
7,525
def test_sigma_clip_masked_data_values(): """ Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """ data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data)
def test_sigma_clip_masked_data_values(): """ Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """ data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert np.all(result.data == data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert_equal(result.data, data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert np.all(result.data == data) assert np.shares_memory(result.data, data)
47,496
def evaluate(args): model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(batch, labels=batch) loss = outputs.loss.repeat(args.valid_batch_size) losses.append(accelerator.gather(loss)) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break losses = torch.cat(losses) eval_size = eval_dataloader.dataset.current_size losses = losses[:eval_size] loss = torch.mean(losses) try: perplexity = torch.exp(loss) except OverflowError: perplexity = float("inf") return loss.item(), perplexity.item()
def evaluate(args): model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(batch, labels=batch) loss = outputs.loss.repeat(args.valid_batch_size) losses.append(accelerator.gather(loss)) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break losses = torch.cat(losses) loss = losses[:eval_dataloader.dataset.current_size].mean() try: perplexity = torch.exp(loss) except OverflowError: perplexity = float("inf") return loss.item(), perplexity.item()
7,172
def test_cell(): """ Test that "page" image can be loaded. """ data.page()
def test_cell(): """ Test that "cell" image can be loaded. """ data.page()
31,406
def update_violation_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """ violation = args["violation"] status = args["status"].upper() notes = args["notes"] if not int(violation) >= 1: raise ValueError("violation must be greater than 0") if status not in VALID_VIOLATION_STATUSES: raise ValueError("status must be one of the following: OPEN, RESOLVED, IGNORED") client.update_violation(violation, status, notes) updated_violation = client.get_violation(violation) human_readable = '' for i in updated_violation['response']: violation_id = i['violation_id'] human_readable += f'### Updated Violation {i["violation_id"]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {violation_id} | {i["violation_status"]} | {timestamp_to_datestring(i["violation_event_timestamp"] * 1000)} | {i["dashboard_url"]} | {i["user"]} | {i["app_name"]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix="GammaViolation", outputs_key_field="violation_id", outputs=updated_violation, raw_response=updated_violation )
def update_violation_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ :type client: Client :param client: Gamma client :param args: all command arguments, usually passed from demisto.args() args['name'] is used as input name :return: A CommandResults object that is then passed to return_results :rtype: ``CommandResults`` """ violation = args["violation"] status = args["status"].upper() notes = args["notes"] if int(violation) < 1: raise ValueError("violation must be greater than 0") if status not in VALID_VIOLATION_STATUSES: raise ValueError("status must be one of the following: OPEN, RESOLVED, IGNORED") client.update_violation(violation, status, notes) updated_violation = client.get_violation(violation) human_readable = '' for i in updated_violation['response']: violation_id = i['violation_id'] human_readable += f'### Updated Violation {i["violation_id"]} \r' \ f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \ f'|---|---|---|---|---|---| \r' \ f'| {violation_id} | {i["violation_status"]} | {timestamp_to_datestring(i["violation_event_timestamp"] * 1000)} | {i["dashboard_url"]} | {i["user"]} | {i["app_name"]} | \r' return CommandResults( readable_output=human_readable, outputs_prefix="GammaViolation", outputs_key_field="violation_id", outputs=updated_violation, raw_response=updated_violation )
19,627
def get_cran_index(cran_url, session, verbose=True): if verbose: print("Fetching main index from %s" % cran_url) r = session.get(cran_url + "/src/contrib/PACKAGES.gz") r.raise_for_status() records = {} package = None for line in gzip.decompress(r.content).decode('utf-8', errors='replace').splitlines(): if package is None: if line.startswith('Package: '): package = line.rstrip().split(' ', 1)[1] elif line.startswith('Version: '): records[package.lower()] = (package, line.rstrip().split(' ', 1)[1]) package = None r = session.get(cran_url + "/src/contrib/Archive/") if r.status_code in (403, 404): if verbose: print("Cannot fetch an archive index from %s" % cran_url) return records r.raise_for_status() for p in re.findall(r'<td><a href="([^"]+)/">\1/</a></td>', r.text): if re.match(r'^[A-Za-z]', p): records.setdefault(p.lower(), (p, None)) return records
def get_cran_index(cran_url, session, verbose=True): if verbose: print("Fetching main index from %s" % cran_url) r = session.get(cran_url + "/src/contrib/PACKAGES.gz") r.raise_for_status() records = {} package = None for line in gzip.decompress(r.content).decode('utf-8', errors='replace').splitlines(): if package is None: if line.startswith('Package: '): package = line.rstrip().split(' ', 1)[1] elif line.startswith('Version: '): records[package.lower()] = (package, line.rstrip().split(' ', 1)[1]) package = None r = session.get(cran_url + "/src/contrib/Archive/") if r.status_code in (403, 404): if verbose: warnings.warn("Cannot fetch an archive index from %s" % cran_url, UserWarning) return records r.raise_for_status() for p in re.findall(r'<td><a href="([^"]+)/">\1/</a></td>', r.text): if re.match(r'^[A-Za-z]', p): records.setdefault(p.lower(), (p, None)) return records
35,395
def generate_plots(ulog, px4_ulog, db_data, vehicle_data): """ create a list of bokeh plots (and widgets) to show """ plots = [] data = ulog.data_list # initialize flight mode changes try: cur_dataset = ulog.get_dataset('commander_state') flight_mode_changes = cur_dataset.list_value_changes('main_state') flight_mode_changes.append((ulog.last_timestamp, -1)) except (KeyError, IndexError) as error: flight_mode_changes = [] # VTOL state changes vtol_states = None try: cur_dataset = ulog.get_dataset('vehicle_status') if np.amax(cur_dataset.data['is_vtol']) == 1: vtol_states = cur_dataset.list_value_changes('in_transition_mode') # find mode after transitions (states: 1=transition, 2=FW, 3=MC) for i in range(len(vtol_states)): if vtol_states[i][1] == 0: t = vtol_states[i][0] idx = np.argmax(cur_dataset.data['timestamp'] >= t) + 1 vtol_states[i] = (t, 2 + cur_dataset.data['is_rotary_wing'][idx]) vtol_states.append((ulog.last_timestamp, -1)) except (KeyError, IndexError) as error: vtol_states = None # Heading sys_name = '' if 'sys_name' in ulog.msg_info_dict: sys_name = cgi.escape(ulog.msg_info_dict['sys_name']) + ' ' div = Div(text="<h1>"+sys_name + px4_ulog.get_mav_type()+"</h1>", width=int(plot_width*0.9)) header_divs = [div] if db_data.description != '': div_descr = Div(text="<h4>"+db_data.description+"</h4>", width=int(plot_width*0.9)) header_divs.append(div_descr) # airframe table_text = [] if 'SYS_AUTOSTART' in ulog.initial_parameters: sys_autostart = ulog.initial_parameters['SYS_AUTOSTART'] airframe_data = get_airframe_data(sys_autostart) if airframe_data is None: table_text.append(('Airframe', str(sys_autostart))) else: airframe_type = '' if 'type' in airframe_data: airframe_type = ', '+airframe_data['type'] table_text.append(('Airframe', airframe_data.get('name')+ airframe_type+' <small>('+str(sys_autostart)+')</small>')) # HW & SW sys_hardware = '' if 'ver_hw' in ulog.msg_info_dict: sys_hardware = cgi.escape(ulog.msg_info_dict['ver_hw']) table_text.append(('Hardware', sys_hardware)) release_str = ulog.get_version_info_str() if release_str is None: release_str = '' release_str_suffix = '' else: release_str += ' <small>(' release_str_suffix = ')</small>' if 'ver_sw' in ulog.msg_info_dict: ver_sw = cgi.escape(ulog.msg_info_dict['ver_sw']) ver_sw_link = 'https://github.com/PX4/Firmware/commit/'+ver_sw table_text.append(('Software Version', release_str + '<a href="'+ver_sw_link+'" target="_blank">'+ver_sw[:8]+'</a>'+ release_str_suffix)) if 'sys_os_name' in ulog.msg_info_dict and 'sys_os_ver_release' in ulog.msg_info_dict: os_name = cgi.escape(ulog.msg_info_dict['sys_os_name']) os_ver = ulog.get_version_info_str('sys_os_ver_release') if os_ver is not None: table_text.append(('OS Version', os_name + ', ' + os_ver)) table_text.append(('Estimator', px4_ulog.get_estimator())) # logging start time & date try: # get the first non-zero timestamp gps_data = ulog.get_dataset('vehicle_gps_position') indices = np.nonzero(gps_data.data['time_utc_usec']) if len(indices[0]) > 0: # we use the timestamp from the log and then convert it with JS to # display with local timezone logging_start_time = int(gps_data.data['time_utc_usec'][indices[0][0]] / 1000000) js_code = """ <script type="text/javascript"> var logging_span = $('#logging-start-element'); var d = new Date(0); d.setUTCSeconds(logging_span.text()); var date_str = ("0" + d.getDate()).slice(-2) + "-" + ("0"+(d.getMonth()+1)).slice(-2) + "-" + d.getFullYear() + " " + ("0" + d.getHours()).slice(-2) + ":" + ("0" + d.getMinutes()).slice(-2); logging_span.text(date_str); logging_span.show(); </script> """ table_text.append(('Logging Start', '<span style="display:none" id="logging-start-element">'+ str(logging_start_time)+'</span>'+js_code)) except: # Ignore. Eg. if topic not found pass # logging duration m, s = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60) h, m = divmod(m, 60) table_text.append(('Logging Duration', '{:d}:{:02d}:{:02d}'.format(h, m, s))) # dropouts dropout_durations = [dropout.duration for dropout in ulog.dropouts] if len(dropout_durations) > 0: total_duration = sum(dropout_durations) / 1000 if total_duration > 5: total_duration_str = '{:.0f}'.format(total_duration) else: total_duration_str = '{:.2f}'.format(total_duration) table_text.append(('Dropouts', '{:} ({:} s)'.format( len(dropout_durations), total_duration_str))) # total vehicle flight time flight_time_s = get_total_flight_time(ulog) if flight_time_s is not None: m, s = divmod(int(flight_time_s), 60) h, m = divmod(m, 60) days, h = divmod(h, 24) flight_time_str = '' if days > 0: flight_time_str += '{:d} days '.format(days) if h > 0: flight_time_str += '{:d} hours '.format(h) if m > 0: flight_time_str += '{:d} minutes '.format(m) flight_time_str += '{:d} seconds '.format(s) table_text.append(('Vehicle Flight Time', flight_time_str)) # vehicle UUID (and name if provided). SITL does not have a UUID if 'sys_uuid' in ulog.msg_info_dict and sys_hardware != 'SITL': sys_uuid = cgi.escape(ulog.msg_info_dict['sys_uuid']) if vehicle_data is not None and vehicle_data.name != '': sys_uuid = sys_uuid + ' (' + vehicle_data.name + ')' if len(sys_uuid) > 0: table_text.append(('Vehicle UUID', sys_uuid)) # Wind speed, rating, feedback if db_data.wind_speed >= 0: table_text.append(('Wind Speed', db_data.wind_speed_str())) if len(db_data.rating) > 0: table_text.append(('Flight Rating', db_data.rating_str())) if len(db_data.feedback) > 0: table_text.append(('Feedback', db_data.feedback.replace('\n', '<br/>'))) if len(db_data.video_url) > 0: table_text.append(('Video', '<a href="'+db_data.video_url+ '" target="_blank">'+db_data.video_url+'</a>')) # generate the table divs_text = '<table>' + ''.join( ['<tr><td class="left">'+a+ ':</td><td>'+b+'</td></tr>' for a, b in table_text]) + '</table>' header_divs.append(Div(text=divs_text, width=int(plot_width*0.9))) plots.append(widgetbox(header_divs, width=int(plot_width*0.9))) # FIXME: for now, we use Google maps directly without bokeh, because it's not working reliably # GPS map # gps_plots = [] # gps_titles = [] # plot = plot_map(ulog, plot_config, map_type='google', api_key = # get_google_maps_api_key(), setpoints=False) # plot = None # if plot is not None: # gps_plots.append(plot) # gps_titles.append('GPS Map: Satellite') # # plot = plot_map(ulog, plot_config, map_type='plain', setpoints=True) # if plot is not None: # gps_plots.append(plot) # gps_titles.append('GPS Map: Plain') # # data_plot = DataPlot2D(data, plot_config, 'vehicle_local_position', # x_axis_label = '[m]', y_axis_label='[m]', plot_height='large') # data_plot.add_graph('y', 'x', colors2[0], 'Estimated') # data_plot.change_dataset('vehicle_local_position_setpoint') # data_plot.add_graph('y', 'x', colors2[1], 'Setpoint') # if data_plot.finalize() is not None: # gps_plots.append(data_plot.bokeh_plot) # gps_titles.append('Local Position') # # # if len(gps_plots) >= 2: # tabs = [] # for i in range(len(gps_plots)): # tabs.append(Panel(child=gps_plots[i], title=gps_titles[i])) # gps_plot_height=plot_config['plot_height']['large'] + 30 # plots.append(Tabs(tabs=tabs, width=plot_width, height=gps_plot_height)) # elif len(gps_plots) == 1: # plots.extend(gps_plots) # Position plot data_plot = DataPlot2D(data, plot_config, 'vehicle_local_position', x_axis_label='[m]', y_axis_label='[m]', plot_height='large') data_plot.add_graph('y', 'x', colors2[0], 'Estimated', check_if_all_zero=True) if not data_plot.had_error: # vehicle_local_position is required data_plot.change_dataset('vehicle_local_position_setpoint') data_plot.add_graph('y', 'x', colors2[1], 'Setpoint') # groundtruth (SITL only) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph('y', 'x', color_gray, 'Groundtruth') # GPS + position setpoints plot_map(ulog, plot_config, map_type='plain', setpoints=True, bokeh_plot=data_plot.bokeh_plot) if data_plot.finalize() is not None: plots.append(data_plot.bokeh_plot) curdoc().template_variables['has_position_data'] = True # initialize parameter changes changed_params = None if not 'replay' in ulog.msg_info_dict: # replay can have many param changes if len(ulog.changed_parameters) > 0: changed_params = ulog.changed_parameters plots.append(None) # save space for the param change button ### Add all data plots ### x_range_offset = (ulog.last_timestamp - ulog.start_timestamp) * 0.05 x_range = Range1d(ulog.start_timestamp - x_range_offset, ulog.last_timestamp + x_range_offset) # Altitude estimate data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', y_axis_label='[m]', title='Altitude Estimate', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('alt', data['alt']*0.001)], colors8[0:1], ['GPS Altitude']) data_plot.change_dataset('sensor_combined') data_plot.add_graph(['baro_alt_meter'], colors8[1:2], ['Barometer Altitude']) data_plot.change_dataset('vehicle_global_position') data_plot.add_graph(['alt'], colors8[2:3], ['Fused Altitude Estimation']) data_plot.change_dataset('position_setpoint_triplet') data_plot.add_circle(['current.alt'], [plot_config['mission_setpoint_color']], ['Altitude Setpoint']) data_plot.change_dataset('tecs_status') data_plot.add_graph(['altitude_sp'], colors8[3:4], ['Tecs Altitude Setpoint']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Roll/Pitch/Yaw angle & angular rate for axis in ['roll', 'pitch', 'yaw']: # angle axis_name = axis.capitalize() data_plot = DataPlot(data, plot_config, 'vehicle_attitude', y_axis_label='[deg]', title=axis_name+' Angle', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], colors2[0:1], [axis_name+' Estimated']) data_plot.change_dataset('vehicle_attitude_setpoint') data_plot.add_graph([lambda data: (axis+'_d', np.rad2deg(data[axis+'_d']))], colors2[1:2], [axis_name+' Setpoint']) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], [color_gray], [axis_name+' Groundtruth']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # rate data_plot = DataPlot(data, plot_config, 'vehicle_attitude', y_axis_label='[deg/s]', title=axis_name+' Angular Rate', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: (axis+'speed', np.rad2deg(data[axis+'speed']))], colors2[0:1], [axis_name+' Rate Estimated']) data_plot.change_dataset('vehicle_rates_setpoint') data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], colors2[1:2], [axis_name+' Rate Setpoint']) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: (axis+'speed', np.rad2deg(data[axis+'speed']))], [color_gray], [axis_name+' Rate Groundtruth']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Local position for axis in ['x', 'y', 'z']: data_plot = DataPlot(data, plot_config, 'vehicle_local_position', y_axis_label='[m]', title='Local Position '+axis.upper(), plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([axis], colors2[0:1], [axis.upper()+' Estimated']) data_plot.change_dataset('vehicle_local_position_setpoint') data_plot.add_graph([axis], colors2[1:2], [axis.upper()+' Setpoint']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Velocity data_plot = DataPlot(data, plot_config, 'vehicle_local_position', y_axis_label='[m/s]', title='Velocity', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['vx', 'vy', 'vz'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Vision position (only if topic found) if any(elem.name == 'vehicle_vision_position' for elem in data): data_plot = DataPlot(data, plot_config, 'vehicle_vision_position', y_axis_label='[m]', title='Vision Position', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['x', 'y', 'z'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph(['x', 'y', 'z'], colors8[2:5], ['Groundtruth X', 'Groundtruth Y', 'Groundtruth Z']) if data_plot.finalize() is not None: plots.append(data_plot) # Vision velocity data_plot = DataPlot(data, plot_config, 'vehicle_vision_position', y_axis_label='[m]', title='Vision Velocity', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['vx', 'vy', 'vz'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph(['vx', 'vy', 'vz'], colors8[2:5], ['Groundtruth X', 'Groundtruth Y', 'Groundtruth Z']) if data_plot.finalize() is not None: plots.append(data_plot) # Vision attitude if any(elem.name == 'vehicle_vision_attitude' for elem in data): data_plot = DataPlot(data, plot_config, 'vehicle_vision_attitude', y_axis_label='[deg]', title='Vision Attitude', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])), lambda data: ('pitch', np.rad2deg(data['pitch'])), lambda data: ('yaw', np.rad2deg(data['yaw']))], colors3, ['Roll', 'Pitch', 'Yaw']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])), lambda data: ('pitch', np.rad2deg(data['pitch'])), lambda data: ('yaw', np.rad2deg(data['yaw']))], colors8[2:5], ['Roll Groundtruth', 'Pitch Groundtruth', 'Yaw Groundtruth']) if data_plot.finalize() is not None: plots.append(data_plot) # Airspeed vs Ground speed try: control_state = ulog.get_dataset('control_state').data # only plot if valid airspeed if np.amax(control_state['airspeed_valid']) == 1: data_plot = DataPlot(data, plot_config, 'vehicle_global_position', y_axis_label='[m/s]', title='Airspeed', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('groundspeed_estimated', np.sqrt(data['vel_n']**2 + data['vel_e']**2))], colors3[2:3], ['Ground Speed Estimated']) data_plot.change_dataset('control_state') data_plot.add_graph(['airspeed'], colors2[0:1], ['Airspeed Estimated']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) except: pass # raw radio control inputs data_plot = DataPlot(data, plot_config, 'rc_channels', title='Raw Radio Control Inputs', plot_height='small', y_range=Range1d(-1.1, 1.1), changed_params=changed_params, x_range=x_range) num_rc_channels = 8 if data_plot.dataset: max_channels = np.amax(data_plot.dataset.data['channel_count']) if max_channels < num_rc_channels: num_rc_channels = max_channels legends = [] for i in range(num_rc_channels): channel_names = px4_ulog.get_configured_rc_input_names(i) if channel_names is None: legends.append('Channel '+str(i)) else: legends.append('Channel '+str(i)+' ('+', '.join(channel_names)+')') data_plot.add_graph(['channels['+str(i)+']' for i in range(num_rc_channels)], colors8[0:num_rc_channels], legends) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator controls 0 data_plot = DataPlot(data, plot_config, 'actuator_controls_0', y_start=0, title='Actuator Controls 0', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['control[0]', 'control[1]', 'control[2]', 'control[3]'], colors8[0:4], ['Roll', 'Pitch', 'Yaw', 'Thrust']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator controls 1 # (only present on VTOL, Fixed-wing config) data_plot = DataPlot(data, plot_config, 'actuator_controls_1', y_start=0, title='Actuator Controls 1 (VTOL in Fixed-Wing mode)', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['control[0]', 'control[1]', 'control[2]', 'control[3]'], colors8[0:4], ['Roll', 'Pitch', 'Yaw', 'Thrust']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator outputs 0: Main data_plot = DataPlot(data, plot_config, 'actuator_outputs', y_start=0, title='Actuator Outputs (Main)', plot_height='small', changed_params=changed_params, x_range=x_range) num_actuator_outputs = 8 if data_plot.dataset: max_outputs = np.amax(data_plot.dataset.data['noutputs']) if max_outputs < num_actuator_outputs: num_actuator_outputs = max_outputs data_plot.add_graph(['output['+str(i)+']' for i in range(num_actuator_outputs)], colors8[0:num_actuator_outputs], ['Output '+str(i) for i in range(num_actuator_outputs)]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator outputs 1: AUX data_plot = DataPlot(data, plot_config, 'actuator_outputs', y_start=0, title='Actuator Outputs (AUX)', plot_height='small', changed_params=changed_params, topic_instance=1, x_range=x_range) num_actuator_outputs = 8 if data_plot.dataset: max_outputs = np.amax(data_plot.dataset.data['noutputs']) if max_outputs < num_actuator_outputs: num_actuator_outputs = max_outputs data_plot.add_graph(['output['+str(i)+']' for i in range(num_actuator_outputs)], colors8[0:num_actuator_outputs], ['Output '+str(i) for i in range(num_actuator_outputs)]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # raw acceleration data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[m/s^2]', title='Raw Acceleration', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['accelerometer_m_s2[0]', 'accelerometer_m_s2[1]', 'accelerometer_m_s2[2]'], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # raw angular speed data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[deg/s]', title='Raw Angular Speed (Gyroscope)', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([ lambda data: ('gyro_rad[0]', np.rad2deg(data['gyro_rad[0]'])), lambda data: ('gyro_rad[1]', np.rad2deg(data['gyro_rad[1]'])), lambda data: ('gyro_rad[2]', np.rad2deg(data['gyro_rad[2]']))], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # magnetic field strength data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[gauss]', title='Raw Magnetic Field Strength', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['magnetometer_ga[0]', 'magnetometer_ga[1]', 'magnetometer_ga[2]'], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # distance sensor data_plot = DataPlot(data, plot_config, 'distance_sensor', y_start=0, y_axis_label='[m]', title='Distance Sensor', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['current_distance', 'covariance'], colors3[0:2], ['Distance', 'Covariance']) if data_plot.finalize() is not None: plots.append(data_plot) # gps uncertainty # the accuracy values can be really large if there is no fix, so we limit the # y axis range to some sane values data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', title='GPS Uncertainty', y_range=Range1d(0, 40), plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['eph', 'epv', 'satellites_used', 'fix_type'], colors8[::2], ['Horizontal position accuracy [m]', 'Vertical position accuracy [m]', 'Num Satellites used', 'GPS Fix']) if data_plot.finalize() is not None: plots.append(data_plot) # gps noise & jamming data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', y_start=0, title='GPS Noise & Jamming', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['noise_per_ms', 'jamming_indicator'], colors3[0:2], ['Noise per ms', 'Jamming Indicator']) if data_plot.finalize() is not None: plots.append(data_plot) # thrust and magnetic field data_plot = DataPlot(data, plot_config, 'sensor_combined', y_start=0, title='Thrust and Magnetic Field', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph( [lambda data: ('len_mag', np.sqrt(data['magnetometer_ga[0]']**2 + data['magnetometer_ga[1]']**2 + data['magnetometer_ga[2]']**2))], colors2[0:1], ['Norm of Magnetic Field']) data_plot.change_dataset('actuator_controls_0') data_plot.add_graph([lambda data: ('thrust', data['control[3]'])], colors2[1:2], ['Thrust']) if data_plot.finalize() is not None: plots.append(data_plot) # power data_plot = DataPlot(data, plot_config, 'battery_status', y_start=0, title='Power', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['voltage_v', 'voltage_filtered_v', 'current_a', lambda data: ('discharged_mah', data['discharged_mah']/100)], colors8[::2], ['Voltage [V]', 'Voltage filtered [V]', 'Current [A]', 'Discharged Amount [mAh / 100]']) if data_plot.finalize() is not None: plots.append(data_plot) # estimator watchdog data_plot = DataPlot(data, plot_config, 'estimator_status', y_start=0, title='Estimator Watchdog', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph( ['nan_flags', 'health_flags', 'timeout_flags', lambda data: ('innovation_check_flags_vel_pos', data['innovation_check_flags']&0x7), lambda data: ('innovation_check_flags_mag', (data['innovation_check_flags']>>3)&0x7), lambda data: ('innovation_check_flags_yaw', (data['innovation_check_flags']>>7)&0x1), lambda data: ('innovation_check_flags_airspeed', (data['innovation_check_flags']>>7)&0x3), lambda data: ('innovation_check_flags_flow', (data['innovation_check_flags']>>9)&0x3)], colors8, ['NaN Flags', 'Health Flags (vel, pos, hgt)', 'Timeout Flags (vel, pos, hgt)', 'Innovation Check Bits (vel, hor pos, vert pos)', 'Innovation Check Bits (mag X, Y, Z)', 'Innovation Check Bits (yaw)', 'Innovation Check Bits (airspeed, height to ground)', 'Innovation Check Bits (optical flow X, Y)']) if data_plot.finalize() is not None: plots.append(data_plot) # RC Quality data_plot = DataPlot(data, plot_config, 'input_rc', title='RC Quality', plot_height='small', y_range=Range1d(0, 1), changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('rssi', data['rssi']/100), 'rc_lost'], colors3[0:2], ['RSSI [0, 1]', 'RC Lost (Indicator)']) data_plot.change_dataset('vehicle_status') data_plot.add_graph(['rc_signal_lost'], colors3[2:3], ['RC Lost (Detected)']) if data_plot.finalize() is not None: plots.append(data_plot) # cpu load data_plot = DataPlot(data, plot_config, 'cpuload', title='CPU & RAM', plot_height='small', y_range=Range1d(0, 1), changed_params=changed_params, x_range=x_range) data_plot.add_graph(['ram_usage', 'load'], [colors3[1], colors3[2]], ['RAM Usage', 'CPU Load']) data_plot.add_span('load', line_color=colors3[2]) data_plot.add_span('ram_usage', line_color=colors3[1]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # sampling: time difference try: data_plot = DataPlot(data, plot_config, 'sensor_combined', y_start=0, y_axis_label='[us]', title='Sampling Regularity of Sensor Data', plot_height='small', changed_params=changed_params, x_range=x_range) sensor_combined = ulog.get_dataset('sensor_combined').data sampling_diff = np.diff(sensor_combined['timestamp']) min_sampling_diff = np.amin(sampling_diff) plot_dropouts(data_plot.bokeh_plot, ulog.dropouts, min_sampling_diff) data_plot.add_graph([lambda data: ('timediff', np.append(sampling_diff, 0))], [colors3[2]], ['delta t (between 2 samples)']) data_plot.change_dataset('estimator_status') data_plot.add_graph([lambda data: ('time_slip', data['time_slip']*1e6)], [colors3[1]], ['Estimator time slip (cumulative)']) if data_plot.finalize() is not None: plots.append(data_plot) except: pass # exchange all DataPlot's with the bokeh_plot and handle parameter changes param_changes_button = Button(label="Hide Parameter Changes", width=170) param_change_labels = [] # FIXME: this should be a CustomJS callback, not on the server. However this # did not work for me. def param_changes_button_clicked(): """ callback to show/hide parameter changes """ for label in param_change_labels: if label.visible: param_changes_button.label = 'Show Parameter Changes' label.visible = False label.text_alpha = 0 # label.visible does not work, so we use this instead else: param_changes_button.label = 'Hide Parameter Changes' label.visible = True label.text_alpha = 1 param_changes_button.on_click(param_changes_button_clicked) jinja_plot_data = [] for i in range(len(plots)): if plots[i] is None: plots[i] = widgetbox(param_changes_button, width=int(plot_width*0.99)) if isinstance(plots[i], DataPlot): if plots[i].param_change_label is not None: param_change_labels.append(plots[i].param_change_label) plots[i] = plots[i].bokeh_plot plot_title = plots[i].title.text fragment = 'Nav-'+plot_title.replace(' ', '-') \ .replace('&', '_').replace('(', '').replace(')', '') jinja_plot_data.append({ 'model_id': plots[i].ref['id'], 'fragment': fragment, 'title': plot_title }) # changed parameters param_names = [] param_values = [] param_defaults = [] param_mins = [] param_maxs = [] param_descriptions = [] default_params = get_default_parameters() for param_name in sorted(ulog.initial_parameters): param_value = ulog.initial_parameters[param_name] if param_name.startswith('RC') or param_name.startswith('CAL_'): continue try: if param_name in default_params: default_param = default_params[param_name] if default_param['type'] == 'FLOAT': is_default = abs(float(default_param['default']) - float(param_value)) < 0.00001 if 'decimal' in default_param: param_value = round(param_value, int(default_param['decimal'])) else: is_default = int(default_param['default']) == int(param_value) if not is_default: param_names.append(param_name) param_values.append(param_value) param_defaults.append(default_param['default']) param_mins.append(default_param.get('min', '')) param_maxs.append(default_param.get('max', '')) param_descriptions.append(default_param.get('short_desc', '')) else: # not found: add it as if it were changed param_names.append(param_name) param_values.append(param_value) param_defaults.append('') param_mins.append('') param_maxs.append('') param_descriptions.append('(unknown)') except Exception as error: print(type(error), error) param_data = dict( names=param_names, values=param_values, defaults=param_defaults, mins=param_mins, maxs=param_maxs, descriptions=param_descriptions) source = ColumnDataSource(param_data) columns = [ TableColumn(field="names", title="Name", width=int(plot_width*0.2), sortable=False), TableColumn(field="values", title="Value", width=int(plot_width*0.15), sortable=False), TableColumn(field="defaults", title="Default", width=int(plot_width*0.1), sortable=False), TableColumn(field="mins", title="Min", width=int(plot_width*0.075), sortable=False), TableColumn(field="maxs", title="Max", width=int(plot_width*0.075), sortable=False), TableColumn(field="descriptions", title="Description", width=int(plot_width*0.40), sortable=False), ] data_table = DataTable(source=source, columns=columns, width=plot_width, height=300, sortable=False, selectable=False) div = Div(text="""<b>Non-default Parameters</b> (except RC and sensor calibration)""", width=int(plot_width/2)) plots.append(widgetbox(div, data_table, width=plot_width)) # log messages log_times = [] log_levels = [] log_messages = [] for m in ulog.logged_messages: m1, s1 = divmod(int(m.timestamp/1e6), 60) h1, m1 = divmod(m1, 60) log_times.append("{:d}:{:02d}:{:02d}".format(h1, m1, s1)) log_levels.append(m.log_level_str()) log_messages.append(m.message) log_data = dict( times=log_times, levels=log_levels, messages=log_messages) source = ColumnDataSource(log_data) columns = [ TableColumn(field="times", title="Time", width=int(plot_width*0.15), sortable=False), TableColumn(field="levels", title="Level", width=int(plot_width*0.1), sortable=False), TableColumn(field="messages", title="Message", width=int(plot_width*0.75), sortable=False), ] data_table = DataTable(source=source, columns=columns, width=plot_width, height=300, sortable=False, selectable=False) div = Div(text="""<b>Logged Messages</b>""", width=int(plot_width/2)) plots.append(widgetbox(div, data_table, width=plot_width)) curdoc().template_variables['plots'] = jinja_plot_data return plots
def generate_plots(ulog, px4_ulog, db_data, vehicle_data): """ create a list of bokeh plots (and widgets) to show """ plots = [] data = ulog.data_list # initialize flight mode changes try: cur_dataset = ulog.get_dataset('commander_state') flight_mode_changes = cur_dataset.list_value_changes('main_state') flight_mode_changes.append((ulog.last_timestamp, -1)) except (KeyError, IndexError) as error: flight_mode_changes = [] # VTOL state changes vtol_states = None try: cur_dataset = ulog.get_dataset('vehicle_status') if np.amax(cur_dataset.data['is_vtol']) == 1: vtol_states = cur_dataset.list_value_changes('in_transition_mode') # find mode after transitions (states: 1=transition, 2=FW, 3=MC) for i in range(len(vtol_states)): if vtol_states[i][1] == 0: t = vtol_states[i][0] idx = np.argmax(cur_dataset.data['timestamp'] >= t) + 1 vtol_states[i] = (t, 2 + cur_dataset.data['is_rotary_wing'][idx]) vtol_states.append((ulog.last_timestamp, -1)) except (KeyError, IndexError) as error: vtol_states = None # Heading sys_name = '' if 'sys_name' in ulog.msg_info_dict: sys_name = cgi.escape(ulog.msg_info_dict['sys_name']) + ' ' div = Div(text="<h1>"+sys_name + px4_ulog.get_mav_type()+"</h1>", width=int(plot_width*0.9)) header_divs = [div] if db_data.description != '': div_descr = Div(text="<h4>"+db_data.description+"</h4>", width=int(plot_width*0.9)) header_divs.append(div_descr) # airframe table_text = [] if 'SYS_AUTOSTART' in ulog.initial_parameters: sys_autostart = ulog.initial_parameters['SYS_AUTOSTART'] airframe_data = get_airframe_data(sys_autostart) if airframe_data is None: table_text.append(('Airframe', str(sys_autostart))) else: airframe_type = '' if 'type' in airframe_data: airframe_type = ', '+airframe_data['type'] table_text.append(('Airframe', airframe_data.get('name')+ airframe_type+' <small>('+str(sys_autostart)+')</small>')) # HW & SW sys_hardware = '' if 'ver_hw' in ulog.msg_info_dict: sys_hardware = cgi.escape(ulog.msg_info_dict['ver_hw']) table_text.append(('Hardware', sys_hardware)) release_str = ulog.get_version_info_str() if release_str is None: release_str = '' release_str_suffix = '' else: release_str += ' <small>(' release_str_suffix = ')</small>' if 'ver_sw' in ulog.msg_info_dict: ver_sw = cgi.escape(ulog.msg_info_dict['ver_sw']) ver_sw_link = 'https://github.com/PX4/Firmware/commit/'+ver_sw table_text.append(('Software Version', release_str + '<a href="'+ver_sw_link+'" target="_blank">'+ver_sw[:8]+'</a>'+ release_str_suffix)) if 'sys_os_name' in ulog.msg_info_dict and 'sys_os_ver_release' in ulog.msg_info_dict: os_name = cgi.escape(ulog.msg_info_dict['sys_os_name']) os_ver = ulog.get_version_info_str('sys_os_ver_release') if os_ver is not None: table_text.append(('OS Version', os_name + ', ' + os_ver)) table_text.append(('Estimator', px4_ulog.get_estimator())) # logging start time & date try: # get the first non-zero timestamp gps_data = ulog.get_dataset('vehicle_gps_position') indices = np.nonzero(gps_data.data['time_utc_usec']) if len(indices[0]) > 0: # we use the timestamp from the log and then convert it with JS to # display with local timezone logging_start_time = int(gps_data.data['time_utc_usec'][indices[0][0]] / 1000000) js_code = """ <script type="text/javascript"> var logging_span = $('#logging-start-element'); var d = new Date(0); d.setUTCSeconds(logging_span.text()); var date_str = ("0" + d.getDate()).slice(-2) + "-" + ("0"+(d.getMonth()+1)).slice(-2) + "-" + d.getFullYear() + " " + ("0" + d.getHours()).slice(-2) + ":" + ("0" + d.getMinutes()).slice(-2); logging_span.text(date_str); logging_span.show(); </script> """ table_text.append(('Logging Start', '<span style="display:none" id="logging-start-element">'+ str(logging_start_time)+'</span>'+js_code)) except: # Ignore. Eg. if topic not found pass # logging duration m, s = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60) h, m = divmod(m, 60) table_text.append(('Logging Duration', '{:d}:{:02d}:{:02d}'.format(h, m, s))) # dropouts dropout_durations = [dropout.duration for dropout in ulog.dropouts] if len(dropout_durations) > 0: total_duration = sum(dropout_durations) / 1000 if total_duration > 5: total_duration_str = '{:.0f}'.format(total_duration) else: total_duration_str = '{:.2f}'.format(total_duration) table_text.append(('Dropouts', '{:} ({:} s)'.format( len(dropout_durations), total_duration_str))) # total vehicle flight time flight_time_s = get_total_flight_time(ulog) if flight_time_s is not None: m, s = divmod(int(flight_time_s), 60) h, m = divmod(m, 60) days, h = divmod(h, 24) flight_time_str = '' if days > 0: flight_time_str += '{:d} days '.format(days) if h > 0: flight_time_str += '{:d} hours '.format(h) if m > 0: flight_time_str += '{:d} minutes '.format(m) flight_time_str += '{:d} seconds '.format(s) table_text.append(('Vehicle Flight Time', flight_time_str)) # vehicle UUID (and name if provided). SITL does not have a UUID if 'sys_uuid' in ulog.msg_info_dict and sys_hardware != 'SITL': sys_uuid = cgi.escape(ulog.msg_info_dict['sys_uuid']) if vehicle_data is not None and vehicle_data.name != '': sys_uuid = sys_uuid + ' (' + vehicle_data.name + ')' if len(sys_uuid) > 0: table_text.append(('Vehicle UUID', sys_uuid)) # Wind speed, rating, feedback if db_data.wind_speed >= 0: table_text.append(('Wind Speed', db_data.wind_speed_str())) if len(db_data.rating) > 0: table_text.append(('Flight Rating', db_data.rating_str())) if len(db_data.feedback) > 0: table_text.append(('Feedback', db_data.feedback.replace('\n', '<br/>'))) if len(db_data.video_url) > 0: table_text.append(('Video', '<a href="'+db_data.video_url+ '" target="_blank">'+db_data.video_url+'</a>')) # generate the table divs_text = '<table>' + ''.join( ['<tr><td class="left">'+a+ ':</td><td>'+b+'</td></tr>' for a, b in table_text]) + '</table>' header_divs.append(Div(text=divs_text, width=int(plot_width*0.9))) plots.append(widgetbox(header_divs, width=int(plot_width*0.9))) # FIXME: for now, we use Google maps directly without bokeh, because it's not working reliably # GPS map # gps_plots = [] # gps_titles = [] # plot = plot_map(ulog, plot_config, map_type='google', api_key = # get_google_maps_api_key(), setpoints=False) # plot = None # if plot is not None: # gps_plots.append(plot) # gps_titles.append('GPS Map: Satellite') # # plot = plot_map(ulog, plot_config, map_type='plain', setpoints=True) # if plot is not None: # gps_plots.append(plot) # gps_titles.append('GPS Map: Plain') # # data_plot = DataPlot2D(data, plot_config, 'vehicle_local_position', # x_axis_label = '[m]', y_axis_label='[m]', plot_height='large') # data_plot.add_graph('y', 'x', colors2[0], 'Estimated') # data_plot.change_dataset('vehicle_local_position_setpoint') # data_plot.add_graph('y', 'x', colors2[1], 'Setpoint') # if data_plot.finalize() is not None: # gps_plots.append(data_plot.bokeh_plot) # gps_titles.append('Local Position') # # # if len(gps_plots) >= 2: # tabs = [] # for i in range(len(gps_plots)): # tabs.append(Panel(child=gps_plots[i], title=gps_titles[i])) # gps_plot_height=plot_config['plot_height']['large'] + 30 # plots.append(Tabs(tabs=tabs, width=plot_width, height=gps_plot_height)) # elif len(gps_plots) == 1: # plots.extend(gps_plots) # Position plot data_plot = DataPlot2D(data, plot_config, 'vehicle_local_position', x_axis_label='[m]', y_axis_label='[m]', plot_height='large') data_plot.add_graph('y', 'x', colors2[0], 'Estimated', check_if_all_zero=True) if not data_plot.had_error: # vehicle_local_position is required data_plot.change_dataset('vehicle_local_position_setpoint') data_plot.add_graph('y', 'x', colors2[1], 'Setpoint') # groundtruth (SITL only) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph('y', 'x', color_gray, 'Groundtruth') # GPS + position setpoints plot_map(ulog, plot_config, map_type='plain', setpoints=True, bokeh_plot=data_plot.bokeh_plot) if data_plot.finalize() is not None: plots.append(data_plot.bokeh_plot) curdoc().template_variables['has_position_data'] = True # initialize parameter changes changed_params = None if not 'replay' in ulog.msg_info_dict: # replay can have many param changes if len(ulog.changed_parameters) > 0: changed_params = ulog.changed_parameters plots.append(None) # save space for the param change button ### Add all data plots ### x_range_offset = (ulog.last_timestamp - ulog.start_timestamp) * 0.05 x_range = Range1d(ulog.start_timestamp - x_range_offset, ulog.last_timestamp + x_range_offset) # Altitude estimate data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', y_axis_label='[m]', title='Altitude Estimate', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('alt', data['alt']*0.001)], colors8[0:1], ['GPS Altitude']) data_plot.change_dataset('sensor_combined') data_plot.add_graph(['baro_alt_meter'], colors8[1:2], ['Barometer Altitude']) data_plot.change_dataset('vehicle_global_position') data_plot.add_graph(['alt'], colors8[2:3], ['Fused Altitude Estimation']) data_plot.change_dataset('position_setpoint_triplet') data_plot.add_circle(['current.alt'], [plot_config['mission_setpoint_color']], ['Altitude Setpoint']) data_plot.change_dataset('tecs_status') data_plot.add_graph(['altitude_sp'], colors8[3:4], ['TECS Altitude Setpoint']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Roll/Pitch/Yaw angle & angular rate for axis in ['roll', 'pitch', 'yaw']: # angle axis_name = axis.capitalize() data_plot = DataPlot(data, plot_config, 'vehicle_attitude', y_axis_label='[deg]', title=axis_name+' Angle', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], colors2[0:1], [axis_name+' Estimated']) data_plot.change_dataset('vehicle_attitude_setpoint') data_plot.add_graph([lambda data: (axis+'_d', np.rad2deg(data[axis+'_d']))], colors2[1:2], [axis_name+' Setpoint']) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], [color_gray], [axis_name+' Groundtruth']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # rate data_plot = DataPlot(data, plot_config, 'vehicle_attitude', y_axis_label='[deg/s]', title=axis_name+' Angular Rate', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: (axis+'speed', np.rad2deg(data[axis+'speed']))], colors2[0:1], [axis_name+' Rate Estimated']) data_plot.change_dataset('vehicle_rates_setpoint') data_plot.add_graph([lambda data: (axis, np.rad2deg(data[axis]))], colors2[1:2], [axis_name+' Rate Setpoint']) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: (axis+'speed', np.rad2deg(data[axis+'speed']))], [color_gray], [axis_name+' Rate Groundtruth']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Local position for axis in ['x', 'y', 'z']: data_plot = DataPlot(data, plot_config, 'vehicle_local_position', y_axis_label='[m]', title='Local Position '+axis.upper(), plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([axis], colors2[0:1], [axis.upper()+' Estimated']) data_plot.change_dataset('vehicle_local_position_setpoint') data_plot.add_graph([axis], colors2[1:2], [axis.upper()+' Setpoint']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Velocity data_plot = DataPlot(data, plot_config, 'vehicle_local_position', y_axis_label='[m/s]', title='Velocity', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['vx', 'vy', 'vz'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # Vision position (only if topic found) if any(elem.name == 'vehicle_vision_position' for elem in data): data_plot = DataPlot(data, plot_config, 'vehicle_vision_position', y_axis_label='[m]', title='Vision Position', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['x', 'y', 'z'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph(['x', 'y', 'z'], colors8[2:5], ['Groundtruth X', 'Groundtruth Y', 'Groundtruth Z']) if data_plot.finalize() is not None: plots.append(data_plot) # Vision velocity data_plot = DataPlot(data, plot_config, 'vehicle_vision_position', y_axis_label='[m]', title='Vision Velocity', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['vx', 'vy', 'vz'], colors3, ['X', 'Y', 'Z']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_local_position_groundtruth') data_plot.add_graph(['vx', 'vy', 'vz'], colors8[2:5], ['Groundtruth X', 'Groundtruth Y', 'Groundtruth Z']) if data_plot.finalize() is not None: plots.append(data_plot) # Vision attitude if any(elem.name == 'vehicle_vision_attitude' for elem in data): data_plot = DataPlot(data, plot_config, 'vehicle_vision_attitude', y_axis_label='[deg]', title='Vision Attitude', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])), lambda data: ('pitch', np.rad2deg(data['pitch'])), lambda data: ('yaw', np.rad2deg(data['yaw']))], colors3, ['Roll', 'Pitch', 'Yaw']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) data_plot.change_dataset('vehicle_attitude_groundtruth') data_plot.add_graph([lambda data: ('roll', np.rad2deg(data['roll'])), lambda data: ('pitch', np.rad2deg(data['pitch'])), lambda data: ('yaw', np.rad2deg(data['yaw']))], colors8[2:5], ['Roll Groundtruth', 'Pitch Groundtruth', 'Yaw Groundtruth']) if data_plot.finalize() is not None: plots.append(data_plot) # Airspeed vs Ground speed try: control_state = ulog.get_dataset('control_state').data # only plot if valid airspeed if np.amax(control_state['airspeed_valid']) == 1: data_plot = DataPlot(data, plot_config, 'vehicle_global_position', y_axis_label='[m/s]', title='Airspeed', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('groundspeed_estimated', np.sqrt(data['vel_n']**2 + data['vel_e']**2))], colors3[2:3], ['Ground Speed Estimated']) data_plot.change_dataset('control_state') data_plot.add_graph(['airspeed'], colors2[0:1], ['Airspeed Estimated']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) except: pass # raw radio control inputs data_plot = DataPlot(data, plot_config, 'rc_channels', title='Raw Radio Control Inputs', plot_height='small', y_range=Range1d(-1.1, 1.1), changed_params=changed_params, x_range=x_range) num_rc_channels = 8 if data_plot.dataset: max_channels = np.amax(data_plot.dataset.data['channel_count']) if max_channels < num_rc_channels: num_rc_channels = max_channels legends = [] for i in range(num_rc_channels): channel_names = px4_ulog.get_configured_rc_input_names(i) if channel_names is None: legends.append('Channel '+str(i)) else: legends.append('Channel '+str(i)+' ('+', '.join(channel_names)+')') data_plot.add_graph(['channels['+str(i)+']' for i in range(num_rc_channels)], colors8[0:num_rc_channels], legends) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator controls 0 data_plot = DataPlot(data, plot_config, 'actuator_controls_0', y_start=0, title='Actuator Controls 0', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['control[0]', 'control[1]', 'control[2]', 'control[3]'], colors8[0:4], ['Roll', 'Pitch', 'Yaw', 'Thrust']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator controls 1 # (only present on VTOL, Fixed-wing config) data_plot = DataPlot(data, plot_config, 'actuator_controls_1', y_start=0, title='Actuator Controls 1 (VTOL in Fixed-Wing mode)', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['control[0]', 'control[1]', 'control[2]', 'control[3]'], colors8[0:4], ['Roll', 'Pitch', 'Yaw', 'Thrust']) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator outputs 0: Main data_plot = DataPlot(data, plot_config, 'actuator_outputs', y_start=0, title='Actuator Outputs (Main)', plot_height='small', changed_params=changed_params, x_range=x_range) num_actuator_outputs = 8 if data_plot.dataset: max_outputs = np.amax(data_plot.dataset.data['noutputs']) if max_outputs < num_actuator_outputs: num_actuator_outputs = max_outputs data_plot.add_graph(['output['+str(i)+']' for i in range(num_actuator_outputs)], colors8[0:num_actuator_outputs], ['Output '+str(i) for i in range(num_actuator_outputs)]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # actuator outputs 1: AUX data_plot = DataPlot(data, plot_config, 'actuator_outputs', y_start=0, title='Actuator Outputs (AUX)', plot_height='small', changed_params=changed_params, topic_instance=1, x_range=x_range) num_actuator_outputs = 8 if data_plot.dataset: max_outputs = np.amax(data_plot.dataset.data['noutputs']) if max_outputs < num_actuator_outputs: num_actuator_outputs = max_outputs data_plot.add_graph(['output['+str(i)+']' for i in range(num_actuator_outputs)], colors8[0:num_actuator_outputs], ['Output '+str(i) for i in range(num_actuator_outputs)]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # raw acceleration data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[m/s^2]', title='Raw Acceleration', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['accelerometer_m_s2[0]', 'accelerometer_m_s2[1]', 'accelerometer_m_s2[2]'], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # raw angular speed data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[deg/s]', title='Raw Angular Speed (Gyroscope)', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph([ lambda data: ('gyro_rad[0]', np.rad2deg(data['gyro_rad[0]'])), lambda data: ('gyro_rad[1]', np.rad2deg(data['gyro_rad[1]'])), lambda data: ('gyro_rad[2]', np.rad2deg(data['gyro_rad[2]']))], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # magnetic field strength data_plot = DataPlot(data, plot_config, 'sensor_combined', y_axis_label='[gauss]', title='Raw Magnetic Field Strength', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['magnetometer_ga[0]', 'magnetometer_ga[1]', 'magnetometer_ga[2]'], colors3, ['X', 'Y', 'Z']) if data_plot.finalize() is not None: plots.append(data_plot) # distance sensor data_plot = DataPlot(data, plot_config, 'distance_sensor', y_start=0, y_axis_label='[m]', title='Distance Sensor', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['current_distance', 'covariance'], colors3[0:2], ['Distance', 'Covariance']) if data_plot.finalize() is not None: plots.append(data_plot) # gps uncertainty # the accuracy values can be really large if there is no fix, so we limit the # y axis range to some sane values data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', title='GPS Uncertainty', y_range=Range1d(0, 40), plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['eph', 'epv', 'satellites_used', 'fix_type'], colors8[::2], ['Horizontal position accuracy [m]', 'Vertical position accuracy [m]', 'Num Satellites used', 'GPS Fix']) if data_plot.finalize() is not None: plots.append(data_plot) # gps noise & jamming data_plot = DataPlot(data, plot_config, 'vehicle_gps_position', y_start=0, title='GPS Noise & Jamming', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['noise_per_ms', 'jamming_indicator'], colors3[0:2], ['Noise per ms', 'Jamming Indicator']) if data_plot.finalize() is not None: plots.append(data_plot) # thrust and magnetic field data_plot = DataPlot(data, plot_config, 'sensor_combined', y_start=0, title='Thrust and Magnetic Field', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph( [lambda data: ('len_mag', np.sqrt(data['magnetometer_ga[0]']**2 + data['magnetometer_ga[1]']**2 + data['magnetometer_ga[2]']**2))], colors2[0:1], ['Norm of Magnetic Field']) data_plot.change_dataset('actuator_controls_0') data_plot.add_graph([lambda data: ('thrust', data['control[3]'])], colors2[1:2], ['Thrust']) if data_plot.finalize() is not None: plots.append(data_plot) # power data_plot = DataPlot(data, plot_config, 'battery_status', y_start=0, title='Power', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph(['voltage_v', 'voltage_filtered_v', 'current_a', lambda data: ('discharged_mah', data['discharged_mah']/100)], colors8[::2], ['Voltage [V]', 'Voltage filtered [V]', 'Current [A]', 'Discharged Amount [mAh / 100]']) if data_plot.finalize() is not None: plots.append(data_plot) # estimator watchdog data_plot = DataPlot(data, plot_config, 'estimator_status', y_start=0, title='Estimator Watchdog', plot_height='small', changed_params=changed_params, x_range=x_range) data_plot.add_graph( ['nan_flags', 'health_flags', 'timeout_flags', lambda data: ('innovation_check_flags_vel_pos', data['innovation_check_flags']&0x7), lambda data: ('innovation_check_flags_mag', (data['innovation_check_flags']>>3)&0x7), lambda data: ('innovation_check_flags_yaw', (data['innovation_check_flags']>>7)&0x1), lambda data: ('innovation_check_flags_airspeed', (data['innovation_check_flags']>>7)&0x3), lambda data: ('innovation_check_flags_flow', (data['innovation_check_flags']>>9)&0x3)], colors8, ['NaN Flags', 'Health Flags (vel, pos, hgt)', 'Timeout Flags (vel, pos, hgt)', 'Innovation Check Bits (vel, hor pos, vert pos)', 'Innovation Check Bits (mag X, Y, Z)', 'Innovation Check Bits (yaw)', 'Innovation Check Bits (airspeed, height to ground)', 'Innovation Check Bits (optical flow X, Y)']) if data_plot.finalize() is not None: plots.append(data_plot) # RC Quality data_plot = DataPlot(data, plot_config, 'input_rc', title='RC Quality', plot_height='small', y_range=Range1d(0, 1), changed_params=changed_params, x_range=x_range) data_plot.add_graph([lambda data: ('rssi', data['rssi']/100), 'rc_lost'], colors3[0:2], ['RSSI [0, 1]', 'RC Lost (Indicator)']) data_plot.change_dataset('vehicle_status') data_plot.add_graph(['rc_signal_lost'], colors3[2:3], ['RC Lost (Detected)']) if data_plot.finalize() is not None: plots.append(data_plot) # cpu load data_plot = DataPlot(data, plot_config, 'cpuload', title='CPU & RAM', plot_height='small', y_range=Range1d(0, 1), changed_params=changed_params, x_range=x_range) data_plot.add_graph(['ram_usage', 'load'], [colors3[1], colors3[2]], ['RAM Usage', 'CPU Load']) data_plot.add_span('load', line_color=colors3[2]) data_plot.add_span('ram_usage', line_color=colors3[1]) plot_flight_modes_background(data_plot.bokeh_plot, flight_mode_changes, vtol_states) if data_plot.finalize() is not None: plots.append(data_plot) # sampling: time difference try: data_plot = DataPlot(data, plot_config, 'sensor_combined', y_start=0, y_axis_label='[us]', title='Sampling Regularity of Sensor Data', plot_height='small', changed_params=changed_params, x_range=x_range) sensor_combined = ulog.get_dataset('sensor_combined').data sampling_diff = np.diff(sensor_combined['timestamp']) min_sampling_diff = np.amin(sampling_diff) plot_dropouts(data_plot.bokeh_plot, ulog.dropouts, min_sampling_diff) data_plot.add_graph([lambda data: ('timediff', np.append(sampling_diff, 0))], [colors3[2]], ['delta t (between 2 samples)']) data_plot.change_dataset('estimator_status') data_plot.add_graph([lambda data: ('time_slip', data['time_slip']*1e6)], [colors3[1]], ['Estimator time slip (cumulative)']) if data_plot.finalize() is not None: plots.append(data_plot) except: pass # exchange all DataPlot's with the bokeh_plot and handle parameter changes param_changes_button = Button(label="Hide Parameter Changes", width=170) param_change_labels = [] # FIXME: this should be a CustomJS callback, not on the server. However this # did not work for me. def param_changes_button_clicked(): """ callback to show/hide parameter changes """ for label in param_change_labels: if label.visible: param_changes_button.label = 'Show Parameter Changes' label.visible = False label.text_alpha = 0 # label.visible does not work, so we use this instead else: param_changes_button.label = 'Hide Parameter Changes' label.visible = True label.text_alpha = 1 param_changes_button.on_click(param_changes_button_clicked) jinja_plot_data = [] for i in range(len(plots)): if plots[i] is None: plots[i] = widgetbox(param_changes_button, width=int(plot_width*0.99)) if isinstance(plots[i], DataPlot): if plots[i].param_change_label is not None: param_change_labels.append(plots[i].param_change_label) plots[i] = plots[i].bokeh_plot plot_title = plots[i].title.text fragment = 'Nav-'+plot_title.replace(' ', '-') \ .replace('&', '_').replace('(', '').replace(')', '') jinja_plot_data.append({ 'model_id': plots[i].ref['id'], 'fragment': fragment, 'title': plot_title }) # changed parameters param_names = [] param_values = [] param_defaults = [] param_mins = [] param_maxs = [] param_descriptions = [] default_params = get_default_parameters() for param_name in sorted(ulog.initial_parameters): param_value = ulog.initial_parameters[param_name] if param_name.startswith('RC') or param_name.startswith('CAL_'): continue try: if param_name in default_params: default_param = default_params[param_name] if default_param['type'] == 'FLOAT': is_default = abs(float(default_param['default']) - float(param_value)) < 0.00001 if 'decimal' in default_param: param_value = round(param_value, int(default_param['decimal'])) else: is_default = int(default_param['default']) == int(param_value) if not is_default: param_names.append(param_name) param_values.append(param_value) param_defaults.append(default_param['default']) param_mins.append(default_param.get('min', '')) param_maxs.append(default_param.get('max', '')) param_descriptions.append(default_param.get('short_desc', '')) else: # not found: add it as if it were changed param_names.append(param_name) param_values.append(param_value) param_defaults.append('') param_mins.append('') param_maxs.append('') param_descriptions.append('(unknown)') except Exception as error: print(type(error), error) param_data = dict( names=param_names, values=param_values, defaults=param_defaults, mins=param_mins, maxs=param_maxs, descriptions=param_descriptions) source = ColumnDataSource(param_data) columns = [ TableColumn(field="names", title="Name", width=int(plot_width*0.2), sortable=False), TableColumn(field="values", title="Value", width=int(plot_width*0.15), sortable=False), TableColumn(field="defaults", title="Default", width=int(plot_width*0.1), sortable=False), TableColumn(field="mins", title="Min", width=int(plot_width*0.075), sortable=False), TableColumn(field="maxs", title="Max", width=int(plot_width*0.075), sortable=False), TableColumn(field="descriptions", title="Description", width=int(plot_width*0.40), sortable=False), ] data_table = DataTable(source=source, columns=columns, width=plot_width, height=300, sortable=False, selectable=False) div = Div(text="""<b>Non-default Parameters</b> (except RC and sensor calibration)""", width=int(plot_width/2)) plots.append(widgetbox(div, data_table, width=plot_width)) # log messages log_times = [] log_levels = [] log_messages = [] for m in ulog.logged_messages: m1, s1 = divmod(int(m.timestamp/1e6), 60) h1, m1 = divmod(m1, 60) log_times.append("{:d}:{:02d}:{:02d}".format(h1, m1, s1)) log_levels.append(m.log_level_str()) log_messages.append(m.message) log_data = dict( times=log_times, levels=log_levels, messages=log_messages) source = ColumnDataSource(log_data) columns = [ TableColumn(field="times", title="Time", width=int(plot_width*0.15), sortable=False), TableColumn(field="levels", title="Level", width=int(plot_width*0.1), sortable=False), TableColumn(field="messages", title="Message", width=int(plot_width*0.75), sortable=False), ] data_table = DataTable(source=source, columns=columns, width=plot_width, height=300, sortable=False, selectable=False) div = Div(text="""<b>Logged Messages</b>""", width=int(plot_width/2)) plots.append(widgetbox(div, data_table, width=plot_width)) curdoc().template_variables['plots'] = jinja_plot_data return plots
24,632
def parse_and_check_molecule_input(argument: str, Z: Integral = None): """ Separate the constitutive elements and charge of a molecule symbol. Parameters ---------- argument : `str` The molecule symbol to be parsed. Z : integer, optional The provided charge number. Returns ------- elements_dict : `dict` A dictionary with identified element symbols as keys and amount of each as values. molecule_info : `str` The molecule symbol stripped of its charge. Z : `int` The charge number of the molecule. Raises ------ `InvalidParticleError` If the symbol couldn't be parsed. Warns ----- : `ParticleWarning` If the charge is given both as an argument and in the symbol. """ molecule_info, z_from_arg = extract_charge(argument) if not re.fullmatch(r"(?:[A-Z][a-z]?\d*)+", molecule_info): raise InvalidParticleError( f"{molecule_info} is not recognized as a molecule symbol." ) elements_dict = {} for match in re.finditer(r"([A-Z][a-z]?)(\d+)?", molecule_info): element, amount = match.groups(default="1") if element in elements_dict: elements_dict[element] += int(amount) else: elements_dict[element] = int(amount) if Z is not None and z_from_arg is not None: if Z != z_from_arg: raise InvalidParticleError( "The charge number extracted from the particle string " f"{argument!r} is inconsistent with the keyword Z = {Z}." ) else: warnings.warn( "Redundant charge information for particle " f"'{argument}' with Z = {Z}.", ParticleWarning, ) if z_from_arg is not None: Z = z_from_arg return elements_dict, molecule_info, Z
def parse_and_check_molecule_input(argument: str, Z: Integral = None): """ Separate the constitutive elements and charge of a molecule symbol. Parameters ---------- argument : `str` The molecule symbol to be parsed. Z : integer, optional The provided charge number. Returns ------- elements_dict : `dict` A dictionary with identified element symbols as keys and amount of each as values. molecule_info : `str` The molecule symbol stripped of its charge. Z : `int` The charge number of the molecule. Raises ------ `InvalidParticleError` If ``argument`` could not be parsed as a molecule. Warns ----- : `ParticleWarning` If the charge is given both as an argument and in the symbol. """ molecule_info, z_from_arg = extract_charge(argument) if not re.fullmatch(r"(?:[A-Z][a-z]?\d*)+", molecule_info): raise InvalidParticleError( f"{molecule_info} is not recognized as a molecule symbol." ) elements_dict = {} for match in re.finditer(r"([A-Z][a-z]?)(\d+)?", molecule_info): element, amount = match.groups(default="1") if element in elements_dict: elements_dict[element] += int(amount) else: elements_dict[element] = int(amount) if Z is not None and z_from_arg is not None: if Z != z_from_arg: raise InvalidParticleError( "The charge number extracted from the particle string " f"{argument!r} is inconsistent with the keyword Z = {Z}." ) else: warnings.warn( "Redundant charge information for particle " f"'{argument}' with Z = {Z}.", ParticleWarning, ) if z_from_arg is not None: Z = z_from_arg return elements_dict, molecule_info, Z
41,957
def get_extras_require() -> Dict[str, List[str]]: requirements = { # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in # examples and tutorials. "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"], "codecov": ["codecov", "pytest-cov"], "doctest": [ "cma", "matplotlib>=3.0.0", "pandas", "plotly>=4.0.0", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "mlflow", ], "document": [ "sphinx", "sphinx_rtd_theme", "sphinx-copybutton", "sphinx-gallery", "sphinx-plotly-directive", "pillow", "matplotlib", "scikit-learn", "plotly>=4.0.0", # optuna/visualization. "pandas", "lightgbm", "torch==1.8.0", "torchvision==0.9.0", "torchaudio==0.8.0", "thop", ], "example": [ "catboost", "chainer", "lightgbm", "mlflow", "mpi4py", "mxnet", "nbval", "scikit-image", "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. "xgboost", "keras", "tensorflow>=2.0.0", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "thop", "skorch", "stable-baselines3>=0.7.0", "catalyst", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "dask[dataframe]", "dask-ml", "botorch>=0.4.0 ; python_version>'3.6'", "fastai", "optax", "dm-haiku", "hydra-optuna-sweeper", ], "experimental": ["redis"], "testing": [ # TODO(toshihikoyanase): Remove the version constraint after resolving the issue # https://github.com/optuna/optuna/issues/1000. "bokeh<2.0.0", "chainer>=5.0.0", "cma", "fakeredis", "lightgbm", "matplotlib>=3.0.0", "mlflow", "mpi4py", "mxnet", "pandas", "plotly>=4.0.0", "pytest", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "xgboost", "keras", "tensorflow", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "skorch", "catalyst", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "botorch>=0.4.0 ; python_version>'3.6'", "fastai", ], "tests": [ "fakeredis", "pytest", ], "optional": [ "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py. "matplotlib>=3.0.0", # optuna/visualization/matplotlib "pandas", # optuna/study.py "plotly>=4.0.0", # optuna/visualization. "redis", # optuna/storages/redis.py. "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. ], "integration": [ # TODO(toshihikoyanase): Remove the version constraint after resolving the issue # https://github.com/optuna/optuna/issues/1000. "chainer>=5.0.0", "cma", "lightgbm", "mlflow", "mpi4py", "mxnet", "pandas", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "xgboost", "keras", "tensorflow", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "skorch", "catalyst", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "botorch>=0.4.0 ; python_version>'3.6'", "fastai", ], } return requirements
def get_extras_require() -> Dict[str, List[str]]: requirements = { # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in # examples and tutorials. "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"], "codecov": ["codecov", "pytest-cov"], "doctest": [ "cma", "matplotlib>=3.0.0", "pandas", "plotly>=4.0.0", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "mlflow", ], "document": [ "sphinx", "sphinx_rtd_theme", "sphinx-copybutton", "sphinx-gallery", "sphinx-plotly-directive", "pillow", "matplotlib", "scikit-learn", "plotly>=4.0.0", # optuna/visualization. "pandas", "lightgbm", "torch==1.8.0", "torchvision==0.9.0", "torchaudio==0.8.0", "thop", ], "example": [ "catboost", "chainer", "lightgbm", "mlflow", "mpi4py", "mxnet", "nbval", "scikit-image", "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. "xgboost", "keras", "tensorflow>=2.0.0", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "thop", "skorch", "stable-baselines3>=0.7.0", "catalyst>=21.3", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "dask[dataframe]", "dask-ml", "botorch>=0.4.0 ; python_version>'3.6'", "fastai", "optax", "dm-haiku", "hydra-optuna-sweeper", ], "experimental": ["redis"], "testing": [ # TODO(toshihikoyanase): Remove the version constraint after resolving the issue # https://github.com/optuna/optuna/issues/1000. "bokeh<2.0.0", "chainer>=5.0.0", "cma", "fakeredis", "lightgbm", "matplotlib>=3.0.0", "mlflow", "mpi4py", "mxnet", "pandas", "plotly>=4.0.0", "pytest", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "xgboost", "keras", "tensorflow", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "skorch", "catalyst", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "botorch>=0.4.0 ; python_version>'3.6'", "fastai", ], "tests": [ "fakeredis", "pytest", ], "optional": [ "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py. "matplotlib>=3.0.0", # optuna/visualization/matplotlib "pandas", # optuna/study.py "plotly>=4.0.0", # optuna/visualization. "redis", # optuna/storages/redis.py. "scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py. ], "integration": [ # TODO(toshihikoyanase): Remove the version constraint after resolving the issue # https://github.com/optuna/optuna/issues/1000. "chainer>=5.0.0", "cma", "lightgbm", "mlflow", "mpi4py", "mxnet", "pandas", "scikit-learn>=0.19.0,<0.23.0", "scikit-optimize", "xgboost", "keras", "tensorflow", "tensorflow-datasets", "pytorch-ignite", "pytorch-lightning>=1.0.2", "skorch", "catalyst", "torch==1.8.0 ; sys_platform=='darwin'", "torch==1.8.0+cpu ; sys_platform!='darwin'", "torchvision==0.9.0 ; sys_platform=='darwin'", "torchvision==0.9.0+cpu ; sys_platform!='darwin'", "torchaudio==0.8.0", # "allennlp>=2.0.0", # See https://github.com/optuna/optuna/pull/2442. "botorch>=0.4.0 ; python_version>'3.6'", "fastai", ], } return requirements
41,903
def _compute_2d(solution_set: np.ndarray, reference_point: np.ndarray) -> float: """Compute the hypervolume for the two-dimensional space. Args: solution_set: The solution set which we want to compute the hypervolume. reference_point: The reference point to compute the hypervolume. """ rx, ry = reference_point _solution_set = solution_set[np.lexsort((-solution_set[:, 1], solution_set[:, 0]))] hypervolume = 0.0 for (xi, yi) in _solution_set: if ry - yi < 0: continue hypervolume += (rx - xi) * (ry - yi) ry = yi return hypervolume
def _compute_2d(solution_set: np.ndarray, reference_point: np.ndarray) -> float: """Compute the hypervolume for the two-dimensional space. Args: solution_set: The solution set which we want to compute the hypervolume. reference_point: The reference point to compute the hypervolume. """ rx, ry = reference_point _solution_set = solution_set[np.lexsort((-solution_set[:, 1], solution_set[:, 0]))] hypervolume = 0.0 for (xi, yi) in _solution_set: if ry < yi: continue hypervolume += (rx - xi) * (ry - yi) ry = yi return hypervolume
15,401
def hb_info_from_type(dev_type="std"): """Return the proper info array for the device type.""" if "std" in dev_type: return DEVICE_SENSORS if "HobbyBoard" in dev_type: return HOBBYBOARD_EF if "EDS00xx" in dev_type: return EDS_SENSORS
def hb_info_from_type(dev_type="std"): """Return the proper info array for the device type.""" if "std" in dev_type: return DEVICE_SENSORS if "HobbyBoard" in dev_type: return HOBBYBOARD_EF if "EDS" in dev_type: return EDS_SENSORS
8,796
def test_bot_mixed_mode_types(mockbot): """Ensure mixed argument- and non-argument- modes are handled. Sopel 6.6.6 and older did not behave well. .. seealso:: Github Issue #1575. """ irc = IRCFactory(mockbot) irc.channel_joined('#test', [ 'Uvoice', 'Uop', 'Uadmin', 'Uvoice2', 'Uop2', 'Uadmin2']) irc.mode_set('#test', '+amov', ['Uadmin', 'Uop', 'Uvoice']) assert mockbot.channels["#test"].privileges[Identifier("Uadmin")] == ADMIN assert mockbot.channels["#test"].privileges[Identifier("Uop")] == OP assert mockbot.channels["#test"].privileges[Identifier("Uvoice")] == VOICE irc.mode_set('#test', '+abov', ['Uadmin2', 'x!y@z', 'Uop2', 'Uvoice2']) assert mockbot.channels["#test"].privileges[Identifier("Uadmin2")] == 0 assert mockbot.channels["#test"].privileges[Identifier("Uop2")] == 0 assert mockbot.channels["#test"].privileges[Identifier("Uvoice2")] == 0 assert mockbot.backend.message_sent == rawlist('WHO #test'), ( 'Upon finding an unexpected nick, the bot must send a WHO request.')
def test_bot_mixed_mode_types(mockbot): """Ensure mixed argument- and non-argument- modes are handled. Sopel 6.6.6 and older did not behave well. .. seealso:: GitHub issue #1575. """ irc = IRCFactory(mockbot) irc.channel_joined('#test', [ 'Uvoice', 'Uop', 'Uadmin', 'Uvoice2', 'Uop2', 'Uadmin2']) irc.mode_set('#test', '+amov', ['Uadmin', 'Uop', 'Uvoice']) assert mockbot.channels["#test"].privileges[Identifier("Uadmin")] == ADMIN assert mockbot.channels["#test"].privileges[Identifier("Uop")] == OP assert mockbot.channels["#test"].privileges[Identifier("Uvoice")] == VOICE irc.mode_set('#test', '+abov', ['Uadmin2', 'x!y@z', 'Uop2', 'Uvoice2']) assert mockbot.channels["#test"].privileges[Identifier("Uadmin2")] == 0 assert mockbot.channels["#test"].privileges[Identifier("Uop2")] == 0 assert mockbot.channels["#test"].privileges[Identifier("Uvoice2")] == 0 assert mockbot.backend.message_sent == rawlist('WHO #test'), ( 'Upon finding an unexpected nick, the bot must send a WHO request.')
57,738
def fetch_incidents(client: Client, max_results: int, last_fetch: str, fetch_filter: str = '' ) -> Tuple[Dict[str, str], List[dict]]: """ :type client: ``Client`` :param client: Tripwire client to use :type max_results: ``int`` :param max_results: Maximum numbers of incidents per fetch :type last_fetch: ``str`` :param last_fetch: A string contains the last fetched date and time. :type fetch_filter: ``Optional[str]`` :param fetch_filter: A string that contains the filters for the command. :return: A tuple containing two elements: next_run (``Dict[str, str]``): Contains the datetime str that will be used in ``last_fetch`` on the next fetch. incidents (``List[dict]``): List of incidents that will be created in XSOAR :rtype: ``Tuple[Dict[str, int], List[dict]]`` """ incidents: List[Dict[str, Any]] = [] last_fetch = datetime.strptime(last_fetch, DATE_FORMAT) last_fetched_ids = demisto.getLastRun().get('fetched_ids', []) alerts = client.get_versions(fetch_filter) alerts = alerts[:int(max_results)] fetched_ids = [] for alert in alerts: incident_created_time = datetime.strptime(alert.get('timeDetected'), '%Y-%m-%dT%H:%M:%S.000Z') if incident_created_time < last_fetch: continue incident_name = alert.get('id') if incident_name in last_fetched_ids: continue incident = { 'name': incident_name, 'occurred': incident_created_time.strftime(DATE_FORMAT), 'rawJSON': json.dumps(alert), } incidents.append(incident) last_fetch = incident_created_time fetched_ids.extend([alert.get('id')]) next_run = {'lastRun': last_fetch.strftime(DATE_FORMAT), 'fetched_ids': fetched_ids if fetched_ids else last_fetched_ids} return next_run, incidents
def fetch_incidents(client: Client, max_results: int, last_fetch: str, fetch_filter: str = '' ) -> Tuple[Dict[str, str], List[dict]]: """ :type client: ``Client`` :param client: Tripwire client to use :type max_results: ``int`` :param max_results: Maximum numbers of incidents per fetch :type last_fetch: ``str`` :param last_fetch: A string contains the last fetched date and time. :type fetch_filter: ``Optional[str]`` :param fetch_filter: A string that contains the filters for the command. :return: A tuple containing two elements: next_run (``Dict[str, str]``): Contains the datetime str that will be used in ``last_fetch`` on the next fetch. incidents (``List[dict]``): List of incidents that will be created in XSOAR :rtype: ``Tuple[Dict[str, int], List[dict]]`` """ incidents: List[Dict[str, Any]] = [] last_fetch = datetime.strptime(last_fetch, DATE_FORMAT) last_fetched_ids = demisto.getLastRun().get('fetched_ids', []) alerts = client.get_versions(fetch_filter) alerts = alerts[:int(max_results)] fetched_ids = [] for alert in alerts: incident_created_time = datetime.strptime(alert.get('timeDetected'), '%Y-%m-%dT%H:%M:%S.000Z') if incident_created_time < last_fetch: continue incident_name = alert.get('id') if incident_name in last_fetched_ids: continue last_fetch = incident_created_time incident = { 'name': incident_name, 'occurred': incident_created_time.strftime(DATE_FORMAT), 'rawJSON': json.dumps(alert), } incidents.append(incident) fetched_ids.extend([alert.get('id')]) next_run = {'lastRun': last_fetch.strftime(DATE_FORMAT), 'fetched_ids': fetched_ids if fetched_ids else last_fetched_ids} return next_run, incidents
6,017
def _handler_is_visible(wrapper, interaction): """ Return true if the target's control is visible. Parameters ---------- wrapper : UIWrapper Wrapper on which the target's control should be a QWidget interaction : IsVisible Not currently used. """ return wrapper._target.control.isVisible()
def _handle_is_visible(wrapper, interaction): """ Return true if the target's control is visible. Parameters ---------- wrapper : UIWrapper Wrapper on which the target's control should be a QWidget interaction : IsVisible Not currently used. """ return wrapper._target.control.isVisible()
13,547
def implicit_euler(A, F, M, U0, t0, t1, nt, mu=None, num_values=None, solver_options='operator'): assert isinstance(A, OperatorInterface) assert isinstance(F, (type(None), OperatorInterface, VectorArrayInterface)) assert isinstance(M, (type(None), OperatorInterface)) assert A.source == A.range num_values = num_values or nt + 1 dt = (t1 - t0) / nt DT = (t1 - t0) / (num_values - 1) if F is None: F_time_dep = False elif isinstance(F, OperatorInterface): assert F.source.dim == 1 assert F.range == A.range F_time_dep = F.parametric and '_t' in F.parameter_type if not F_time_dep: dt_F = F.as_vector(mu) * dt else: assert len(F) == 1 assert F in A.range F_time_dep = False dt_F = F * dt if M is None: from pymor.operators.constructions import IdentityOperator M = IdentityOperator(A.source) assert A.source == M.source == M.range assert not M.parametric assert U0 in A.source assert len(U0) == 1 R = A.source.empty(reserve=nt+1) R.append(U0) options = A.solver_options if solver_options == 'operator' else \ M.solver_options if solver_options == 'mass' else \ solver_options M_dt_A = (M + A * dt).with_(solver_options=options) if not M_dt_A.parametric or '_t' not in M_dt_A.solver_options: M_dt_A = M_dt_A.assemble(mu) t = t0 U = U0.copy() for n in range(nt): t += dt mu['_t'] = t rhs = M.apply(U) if F_time_dep: dt_F = F.as_vector(mu) * dt if F: rhs += dt_F U = M_dt_A.apply_inverse(rhs, mu=mu) while t - t0 + (min(dt, DT) * 0.5) >= len(R) * DT: R.append(U) return R
def implicit_euler(A, F, M, U0, t0, t1, nt, mu=None, num_values=None, solver_options='operator'): assert isinstance(A, OperatorInterface) assert isinstance(F, (type(None), OperatorInterface, VectorArrayInterface)) assert isinstance(M, (type(None), OperatorInterface)) assert A.source == A.range num_values = num_values or nt + 1 dt = (t1 - t0) / nt DT = (t1 - t0) / (num_values - 1) if F is None: F_time_dep = False elif isinstance(F, OperatorInterface): assert F.source.dim == 1 assert F.range == A.range F_time_dep = F.parametric and '_t' in F.parameter_type if not F_time_dep: dt_F = F.as_vector(mu) * dt else: assert len(F) == 1 assert F in A.range F_time_dep = False dt_F = F * dt if M is None: from pymor.operators.constructions import IdentityOperator M = IdentityOperator(A.source) assert A.source == M.source == M.range assert not M.parametric assert U0 in A.source assert len(U0) == 1 R = A.source.empty(reserve=nt+1) R.append(U0) options = A.solver_options if solver_options == 'operator' else \ M.solver_options if solver_options == 'mass' else \ solver_options M_dt_A = (M + A * dt).with_(solver_options=options) if not M_dt_A.parametric or '_t' not in M_dt_A.parameter_type: M_dt_A = M_dt_A.assemble(mu) t = t0 U = U0.copy() for n in range(nt): t += dt mu['_t'] = t rhs = M.apply(U) if F_time_dep: dt_F = F.as_vector(mu) * dt if F: rhs += dt_F U = M_dt_A.apply_inverse(rhs, mu=mu) while t - t0 + (min(dt, DT) * 0.5) >= len(R) * DT: R.append(U) return R
2,494
def test_gradient_boosting_early_stopping(): X, y = make_classification(n_samples=1000, random_state=0) gbc = GradientBoostingClassifier( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) gbr = GradientBoostingRegressor( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # Check if early_stopping works as expected for est, tol, early_stop_n_estimators in ( (gbc, 1e-1, 28), (gbr, 1e-1, 13), (gbc, 1e-3, 70), (gbr, 1e-3, 28), ): est.set_params(tol=tol) est.fit(X_train, y_train) assert est.n_estimators_ == early_stop_n_estimators assert est.score(X_test, y_test) > 0.7 # Without early stopping gbc = GradientBoostingClassifier( n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42 ) gbc.fit(X, y) gbr = GradientBoostingRegressor( n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42 ) gbr.fit(X, y) assert gbc.n_estimators_ == 5 assert gbr.n_estimators_ == 10
def test_gradient_boosting_early_stopping(): X, y = make_classification(n_samples=1000, random_state=0) gbc = GradientBoostingClassifier( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) gbr = GradientBoostingRegressor( n_estimators=100, n_iter_no_change=10, learning_rate=0.1, max_depth=3, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # Check if early_stopping works as expected for est, tol, early_stop_n_estimators in ( (gbc, 1e-1, 28), (gbr, 1e-1, 13), (gbc, 1e-3, 70), (gbr, 1e-3, 28), ): est.set_params(tol=tol) est.fit(X_train, y_train) assert est.n_estimators_ == early_stop_n_estimators assert est.score(X_test, y_test) > 0.7 # Without early stopping gbc = GradientBoostingClassifier( n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42 ) gbc.fit(X, y) gbr = GradientBoostingRegressor( n_estimators=30, learning_rate=0.1, max_depth=3, random_state=42 ) gbr.fit(X, y) assert gbc.n_estimators_ == 5 assert gbr.n_estimators_ == 10
5,924
def assert_contains_expected_lines(string, expected_lines): for expected_line in expected_lines: assert expected_line in string
def assert_contains_expected_lines(string, expected_lines): for expected_line in expected_lines: assert (expected_line + "\n") in string
30,033
def check_reset_seed(env: gym.Env): """Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """ signature = inspect.signature(env.reset) if "seed" in signature.parameters or "kwargs" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), "`env.reset(seed=123)` is not deterministic as the observations are not equivalent" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not same when the same seeds are passed to `env.reset`." ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), "The observation returns by `env.reset(seed=456)` is not within the observation space" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not different when different seeds are passed to `env.reset`." ) except TypeError as e: raise AssertionError( "The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. " "This should never happen, please report this issue. " f"The error was: {e}" ) if env.unwrapped._np_random is None: logger.warn( "Resetting the environment did not result in seeding its random number generator. " "This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. " "If you do not use the python-level random number generator, this is not a problem." ) seed_param = signature.parameters.get("seed") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( "The default seed argument in reset should be `None`, " "otherwise the environment will by default always be deterministic. " f"Actual default: {seed_param.default}" ) else: raise gym.error.Error( "The `reset` method does not provide the `seed` keyword argument" )
def check_reset_seed(env: gym.Env): """Check that the environment can be reset with a seed. Args: env: The environment to check Raises: AssertionError: The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. """ signature = inspect.signature(env.reset) if "seed" in signature.parameters or "kwargs" in signature.parameters: try: obs_1 = env.reset(seed=123) assert ( obs_1 in env.observation_space ), "The observation returned by `env.reset(seed=123)` is not within the observation space" seed_123_rng = deepcopy(env.unwrapped.np_random) obs_2 = env.reset(seed=123) assert ( obs_2 in env.observation_space ), "The observation returns by `env.reset(seed=123)` is not within the observation space" if env.spec is not None and env.spec.nondeterministic is False: assert data_equivalence( obs_1, obs_2 ), "`env.reset(seed=123)` is not deterministic as the observations are not equivalent" assert ( env.unwrapped.np_random.bit_generator.state == seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not same when the same seeds are passed to `env.reset`." ) obs_3 = env.reset(seed=456) assert ( obs_3 in env.observation_space ), "The observation returns by `env.reset(seed=456)` is not within the observation space" assert ( env.unwrapped.np_random.bit_generator.state != seed_123_rng.bit_generator.state ), ( "Mostly likely the environment reset function does not call `super().reset(seed=seed)` " "as the random generates are not different when different seeds are passed to `env.reset`." ) except TypeError as e: raise AssertionError( "The environment cannot be reset with a random seed, even though `seed` or `kwargs` appear in the signature. " "This should never happen, please report this issue. " f"The error was: {e}" ) if env.unwrapped._np_random is None: logger.warn( "Resetting the environment did not result in seeding its random number generator. " "This is likely due to not calling `super().reset(seed=seed)` in the `reset` method. " "If you do not use the python-level random number generator, this is not a problem." ) seed_param = signature.parameters.get("seed") # Check the default value is None if seed_param is not None and seed_param.default is not None: logger.warn( "The default seed argument in reset should be `None`, " "otherwise the environment will by default always be deterministic. " f"Actual default: {seed_param.default}" ) else: raise gym.error.Error( "The `reset` method does not provide the `seed` keyword argument" )
30,111
def test_sig_extract_8_nofile_picklist_fail(runtmp): # what happens with an empty picklist? sig47 = utils.get_test_data('47.fa.sig') sig63 = utils.get_test_data('63.fa.sig') # picklist file does not exist picklist_csv = runtmp.output('pick.csv') picklist_arg = f"{picklist_csv}:md5full:md5" with pytest.raises(SourmashCommandFailed): runtmp.sourmash('sig', 'extract', sig47, sig63, '--picklist', picklist_arg) err = runtmp.last_result.err print(err) assert "must exist and be a regular file" in err
def test_sig_extract_8_nofile_picklist_fail(runtmp): # what happens when picklist file does not exist? sig47 = utils.get_test_data('47.fa.sig') sig63 = utils.get_test_data('63.fa.sig') # picklist file does not exist picklist_csv = runtmp.output('pick.csv') picklist_arg = f"{picklist_csv}:md5full:md5" with pytest.raises(SourmashCommandFailed): runtmp.sourmash('sig', 'extract', sig47, sig63, '--picklist', picklist_arg) err = runtmp.last_result.err print(err) assert "must exist and be a regular file" in err
56,286
def sisdr(y, target, db_flag = True, reg_pow_db = None, inv_flag=False, mean_flag=False, onelog_flag=False, beta_flag=False): dim_agregate = -1 reg_pow = EPS if reg_pow_db is None else 10**(reg_pow_db/10) * y.shape[1] target = target - torch.mean(target, dim=dim_agregate, keepdim=True) y = y - torch.mean(y, dim=dim_agregate, keepdim=True) acc_f = torch.mean if mean_flag else torch.sum y_by_target = acc_f(y * target, dim=dim_agregate, keepdim=True) t2 = acc_f(target ** 2, dim=dim_agregate, keepdim=True) if beta_flag: #scale model output to target y_target = target beta = (t2 + reg_pow)/(y_by_target+reg_pow) y_noise = y * beta - target else: #scale target to model output alfa = y_by_target/(t2 + reg_pow) y_target = alfa * target y_noise = y - y_target target_pow = acc_f(y_target ** 2, dim=dim_agregate) noise_pow = acc_f(y_noise ** 2, dim=dim_agregate) if db_flag: if onelog_flag: l = -10 * torch.log10((noise_pow + reg_pow)/(target_pow + reg_pow)) else: l = 10 * torch.log10(target_pow + reg_pow) - 10 * torch.log10(noise_pow + reg_pow) if inv_flag: l = -l else: if inv_flag: l = (noise_pow + reg_pow) / (target_pow + reg_pow) else: l = (target_pow + reg_pow) / (noise_pow + reg_pow) return l
def sisdr(y, target, db_flag = True, reg_pow_db = None, inv_flag=False, mean_flag=False, onelog_flag=False, beta_flag=False): dim_agregate = -1 reg_pow = EPS if reg_pow_db is None else 10**(reg_pow_db/10) * y.shape[1] target = target - torch.mean(target, dim=dim_agregate, keepdim=True) y = y - torch.mean(y, dim=dim_agregate, keepdim=True) acc_f = torch.mean if mean_flag else torch.sum y_by_target = acc_f(y * target, dim=dim_agregate, keepdim=True) t2 = acc_f(target ** 2, dim=dim_agregate, keepdim=True) if beta_flag: #scale model output to target y_target = target beta = (t2 + reg_pow)/(y_by_target+reg_pow) y_noise = y * beta - target else: #scale target to model output alfa = y_by_target/(t2 + reg_pow) y_target = alfa * target y_noise = y - y_target target_pow = acc_f(y_target ** 2, dim=dim_agregate) noise_pow = acc_f(y_noise ** 2, dim=dim_agregate) if db_flag: if onelog_flag: l = -10 * torch.log10((noise_pow + reg_pow)/(target_pow + reg_pow)) else: l = 10 * torch.log10(target_pow + reg_pow) - 10 * torch.log10(noise_pow + reg_pow) if inv_flag: l = -l else: if inv_flag: l = (noise_pow + reg_pow) / (target_pow + reg_pow) else: l = (target_pow + reg_pow) / (noise_pow + reg_pow) return l
34,100
def _add_partitions_to_table( table: "pyarrow.Table", partitions: Dict[str, Any] ) -> "pyarrow.Table": for field in table.column_names: if field in partitions: raise RuntimeError( f"{field} is a partition key, but it's also the name of a column in the" "read dataset." ) num_columns = table.num_columns for i, (field, value) in enumerate(partitions.items()): column = [[value] * len(table)] table = table.add_column(num_columns + i, field, column) return table
def _add_partitions_to_table( table: "pyarrow.Table", partitions: Dict[str, Any] ) -> "pyarrow.Table": for field in table.column_names: if field in partitions: raise ValueError( f"{field} is a partition key, but it's also the name of a column in the" "read dataset." ) num_columns = table.num_columns for i, (field, value) in enumerate(partitions.items()): column = [[value] * len(table)] table = table.add_column(num_columns + i, field, column) return table
25,772
def adjacency_matrix(network, branch_components=None, investment_period=None, busorder=None, weights=None): """ Construct a sparse adjacency matrix (directed) Parameters ---------- branch_components : iterable sublist of `branch_components` Buses connected by any of the selected branches are adjacent (default: branch_components (network) or passive_branch_components (sub_network)) busorder : pd.Index subset of network.buses.index Basis to use for the matrix representation of the adjacency matrix (default: buses.index (network) or buses_i() (sub_network)) weights : pd.Series or None (default) If given must provide a weight for each branch, multi-indexed on branch_component name and branch name. Returns ------- adjacency_matrix : sp.sparse.coo_matrix Directed adjacency matrix """ from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components if busorder is None: busorder = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components if busorder is None: busorder = network.buses_i() else: raise TypeError(" must be called with a Network or a SubNetwork") no_buses = len(busorder) no_branches = 0 bus0_inds = [] bus1_inds = [] weight_vals = [] for c in network.iterate_components(branch_components): if c.ind is None: if investment_period is None: sel = slice(None) else: active = get_active_assets(network, c.name, investment_period, network.snapshots) sel = c.df.loc[active].index else: if investment_period is None: sel = c.ind else: active = get_active_assets(network, c.name, investment_period, network.snapshots) sel = c.ind & c.df.loc[active].index no_branches = len(c.df.loc[sel]) bus0_inds.append(busorder.get_indexer(c.df.loc[sel, "bus0"])) bus1_inds.append(busorder.get_indexer(c.df.loc[sel, "bus1"])) weight_vals.append(np.ones(no_branches) if weights is None else weights[c.name][sel].values) if no_branches == 0: return sp.sparse.coo_matrix((no_buses, no_buses)) bus0_inds = np.concatenate(bus0_inds) bus1_inds = np.concatenate(bus1_inds) weight_vals = np.concatenate(weight_vals) return sp.sparse.coo_matrix((weight_vals, (bus0_inds, bus1_inds)), shape=(no_buses, no_buses))
def adjacency_matrix(network, branch_components=None, investment_period=None, busorder=None, weights=None): """ Construct a sparse adjacency matrix (directed) Parameters ---------- branch_components : iterable sublist of `branch_components` Buses connected by any of the selected branches are adjacent (default: branch_components (network) or passive_branch_components (sub_network)) busorder : pd.Index subset of network.buses.index Basis to use for the matrix representation of the adjacency matrix (default: buses.index (network) or buses_i() (sub_network)) weights : pd.Series or None (default) If given must provide a weight for each branch, multi-indexed on branch_component name and branch name. Returns ------- adjacency_matrix : sp.sparse.coo_matrix Directed adjacency matrix """ from . import components if isinstance(network, components.Network): if branch_components is None: branch_components = network.branch_components if busorder is None: busorder = network.buses.index elif isinstance(network, components.SubNetwork): if branch_components is None: branch_components = network.network.passive_branch_components if busorder is None: busorder = network.buses_i() else: raise TypeError(" must be called with a Network or a SubNetwork") no_buses = len(busorder) no_branches = 0 bus0_inds = [] bus1_inds = [] weight_vals = [] for c in network.iterate_components(branch_components): if c.ind is None: if investment_period is None: sel = slice(None) else: active = get_active_assets(network, c.name, investment_period, network.snapshots) sel = c.df.loc[active].index else: if investment_period is None: sel = c.ind else: active = get_active_assets(network, c.name, investment_period, network.snapshots) sel = c.ind & c.df.loc[active].index no_branches = len(c.df.loc[sel]) bus0_inds.append(busorder.get_indexer(c.df.loc[sel, "bus0"])) bus1_inds.append(busorder.get_indexer(c.df.loc[sel, "bus1"])) weight_vals.append(np.ones(no_branches) if weights is None else weights[c.name][sel].values) if no_branches == 0: return sp.sparse.coo_matrix((no_buses, no_buses)) bus0_inds = np.concatenate(bus0_inds) bus1_inds = np.concatenate(bus1_inds) weight_vals = np.concatenate(weight_vals) return sp.sparse.coo_matrix((weight_vals, (bus0_inds, bus1_inds)), shape=(no_buses, no_buses))
22,419
def set_metadata_portable(): tool_job_working_directory = os.path.abspath(os.getcwd()) metadata_tmp_files_dir = os.path.join(tool_job_working_directory, "metadata") MetadataTempFile.tmp_dir = metadata_tmp_files_dir metadata_params_path = os.path.join("metadata", "params.json") try: with open(metadata_params_path) as f: metadata_params = json.load(f) except OSError: raise Exception("Failed to find metadata/params.json from cwd [%s]" % tool_job_working_directory) datatypes_config = metadata_params["datatypes_config"] job_metadata = metadata_params["job_metadata"] provided_metadata_style = metadata_params.get("provided_metadata_style") max_metadata_value_size = metadata_params.get("max_metadata_value_size") or 0 outputs = metadata_params["outputs"] datatypes_registry = validate_and_load_datatypes_config(datatypes_config) tool_provided_metadata = load_job_metadata(job_metadata, provided_metadata_style) def set_meta(new_dataset_instance, file_dict): set_meta_with_tool_provided(new_dataset_instance, file_dict, set_meta_kwds, datatypes_registry, max_metadata_value_size) object_store_conf_path = os.path.join("metadata", "object_store_conf.json") extended_metadata_collection = os.path.exists(object_store_conf_path) object_store = None job_context = None version_string = "" export_store = None final_job_state = 'ok' if extended_metadata_collection: tool_dict = metadata_params["tool"] stdio_exit_code_dicts, stdio_regex_dicts = tool_dict["stdio_exit_codes"], tool_dict["stdio_regexes"] stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts)) stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts)) with open(object_store_conf_path) as f: config_dict = json.load(f) assert config_dict is not None object_store = build_object_store_from_config(None, config_dict=config_dict) Dataset.object_store = object_store outputs_directory = os.path.join(tool_job_working_directory, "outputs") if not os.path.exists(outputs_directory): outputs_directory = tool_job_working_directory # TODO: constants... if os.path.exists(os.path.join(outputs_directory, "tool_stdout")): with open(os.path.join(outputs_directory, "tool_stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "tool_stderr"), "rb") as f: tool_stderr = f.read() elif os.path.exists(os.path.join(outputs_directory, "stdout")): # Puslar style working directory. with open(os.path.join(outputs_directory, "stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "stderr"), "rb") as f: tool_stderr = f.read() job_id_tag = metadata_params["job_id_tag"] exit_code_file = default_exit_code_file(".", job_id_tag) tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag) check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr, tool_exit_code, job_id_tag) if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs(): final_job_state = Job.states.OK else: final_job_state = Job.states.ERROR version_string = "" if os.path.exists(COMMAND_VERSION_FILENAME): version_string = open(COMMAND_VERSION_FILENAME).read() expression_context = ExpressionContext(dict(stdout=tool_stdout, stderr=tool_stderr)) # Load outputs. export_store = store.DirectoryModelExportStore('metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=False) try: import_model_store = store.imported_store_for_metadata('metadata/outputs_new', object_store=object_store) except AssertionError: # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now import_model_store = None job_context = SessionlessJobContext( metadata_params, tool_provided_metadata, object_store, export_store, import_model_store, os.path.join(tool_job_working_directory, "working"), final_job_state=final_job_state, ) unnamed_id_to_path = {} for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs(): destination = unnamed_output_dict["destination"] elements = unnamed_output_dict["elements"] destination_type = destination["type"] if destination_type == 'hdas': for element in elements: filename = element.get('filename') if filename: unnamed_id_to_path[element['object_id']] = os.path.join(job_context.job_working_directory, filename) for output_name, output_dict in outputs.items(): dataset_instance_id = output_dict["id"] klass = getattr(galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation')) dataset = None if import_model_store: dataset = import_model_store.sa_session.query(klass).find(dataset_instance_id) if dataset is None: # legacy check for jobs that started before 21.01, remove on 21.05 filename_in = os.path.join("metadata/metadata_in_%s" % output_name) import pickle dataset = pickle.load(open(filename_in, 'rb')) # load DatasetInstance assert dataset is not None filename_kwds = os.path.join("metadata/metadata_kwds_%s" % output_name) filename_out = os.path.join("metadata/metadata_out_%s" % output_name) filename_results_code = os.path.join("metadata/metadata_results_%s" % output_name) override_metadata = os.path.join("metadata/metadata_override_%s" % output_name) dataset_filename_override = output_dict["filename_override"] # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX legacy_object_store_store_by = metadata_params.get("object_store_store_by", "id") # Same block as below... set_meta_kwds = stringify_dictionary_keys(json.load(open(filename_kwds))) # load kwds; need to ensure our keywords are not unicode try: dataset.dataset.external_filename = unnamed_id_to_path.get(dataset_instance_id, dataset_filename_override) store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by) extra_files_dir_name = "dataset_%s_files" % getattr(dataset.dataset, store_by) files_path = os.path.abspath(os.path.join(tool_job_working_directory, "working", extra_files_dir_name)) dataset.dataset.external_extra_files_path = files_path file_dict = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if 'ext' in file_dict: dataset.extension = file_dict['ext'] # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles override_metadata = json.load(open(override_metadata)) for metadata_name, metadata_file_override in override_metadata: if MetadataTempFile.is_JSONified_value(metadata_file_override): metadata_file_override = MetadataTempFile.from_JSON(metadata_file_override) setattr(dataset.metadata, metadata_name, metadata_file_override) if output_dict.get("validate", False): set_validated_state(dataset) set_meta(dataset, file_dict) if extended_metadata_collection: meta = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if meta: context = ExpressionContext(meta, expression_context) else: context = expression_context # Lazy and unattached # if getattr(dataset, "hidden_beneath_collection_instance", None): # dataset.visible = False dataset.blurb = 'done' dataset.peek = 'no peek' dataset.info = (dataset.info or '') if context['stdout'].strip(): # Ensure white space between entries dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip() if context['stderr'].strip(): # Ensure white space between entries dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip() dataset.tool_version = version_string dataset.set_size() if 'uuid' in context: dataset.dataset.uuid = context['uuid'] if dataset_filename_override and dataset_filename_override != dataset.file_name: # This has to be a job with outputs_to_working_directory set. # We update the object store with the created output file. object_store.update_from_file(dataset.dataset, file_name=dataset_filename_override, create=True) collect_extra_files(object_store, dataset, ".") if Job.states.ERROR == final_job_state: dataset.blurb = "error" dataset.mark_unhidden() else: # If the tool was expected to set the extension, attempt to retrieve it if dataset.ext == 'auto': dataset.extension = context.get('ext', 'data') dataset.init_meta(copy_from=dataset) # This has already been done: # else: # self.external_output_metadata.load_metadata(dataset, output_name, self.sa_session, working_directory=self.working_directory, remote_metadata_directory=remote_metadata_directory) line_count = context.get('line_count', None) try: # Certain datatype's set_peek methods contain a line_count argument dataset.set_peek(line_count=line_count) except TypeError: # ... and others don't dataset.set_peek() for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS: if context_key in context: context_value = context[context_key] setattr(dataset, context_key, context_value) # We never want to persist the external_filename. dataset.dataset.external_filename = None export_store.add_dataset(dataset) else: dataset.metadata.to_JSON_dict(filename_out) # write out results of set_meta json.dump((True, 'Metadata has been set successfully'), open(filename_results_code, 'wt+')) # setting metadata has succeeded except Exception: json.dump((False, traceback.format_exc()), open(filename_results_code, 'wt+')) # setting metadata has failed somehow if extended_metadata_collection: # discover extra outputs... output_collections = {} for name, output_collection in metadata_params["output_collections"].items(): output_collections[name] = import_model_store.sa_session.query(HistoryDatasetCollectionAssociation).find(output_collection["id"]) outputs = {} for name, output in metadata_params["outputs"].items(): klass = getattr(galaxy.model, output.get('model_class', 'HistoryDatasetAssociation')) outputs[name] = import_model_store.sa_session.query(klass).find(output["id"]) input_ext = json.loads(metadata_params["job_params"].get("__input_ext", '"data"')) collect_primary_datasets( job_context, outputs, input_ext=input_ext, ) collect_dynamic_outputs(job_context, output_collections) if export_store: export_store._finalize() write_job_metadata(tool_job_working_directory, job_metadata, set_meta, tool_provided_metadata)
def set_metadata_portable(): tool_job_working_directory = os.path.abspath(os.getcwd()) metadata_tmp_files_dir = os.path.join(tool_job_working_directory, "metadata") MetadataTempFile.tmp_dir = metadata_tmp_files_dir metadata_params_path = os.path.join("metadata", "params.json") try: with open(metadata_params_path) as f: metadata_params = json.load(f) except OSError: raise Exception("Failed to find metadata/params.json from cwd [%s]" % tool_job_working_directory) datatypes_config = metadata_params["datatypes_config"] job_metadata = metadata_params["job_metadata"] provided_metadata_style = metadata_params.get("provided_metadata_style") max_metadata_value_size = metadata_params.get("max_metadata_value_size") or 0 outputs = metadata_params["outputs"] datatypes_registry = validate_and_load_datatypes_config(datatypes_config) tool_provided_metadata = load_job_metadata(job_metadata, provided_metadata_style) def set_meta(new_dataset_instance, file_dict): set_meta_with_tool_provided(new_dataset_instance, file_dict, set_meta_kwds, datatypes_registry, max_metadata_value_size) object_store_conf_path = os.path.join("metadata", "object_store_conf.json") extended_metadata_collection = os.path.exists(object_store_conf_path) object_store = None job_context = None version_string = "" export_store = None final_job_state = Job.states.OK if extended_metadata_collection: tool_dict = metadata_params["tool"] stdio_exit_code_dicts, stdio_regex_dicts = tool_dict["stdio_exit_codes"], tool_dict["stdio_regexes"] stdio_exit_codes = list(map(ToolStdioExitCode, stdio_exit_code_dicts)) stdio_regexes = list(map(ToolStdioRegex, stdio_regex_dicts)) with open(object_store_conf_path) as f: config_dict = json.load(f) assert config_dict is not None object_store = build_object_store_from_config(None, config_dict=config_dict) Dataset.object_store = object_store outputs_directory = os.path.join(tool_job_working_directory, "outputs") if not os.path.exists(outputs_directory): outputs_directory = tool_job_working_directory # TODO: constants... if os.path.exists(os.path.join(outputs_directory, "tool_stdout")): with open(os.path.join(outputs_directory, "tool_stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "tool_stderr"), "rb") as f: tool_stderr = f.read() elif os.path.exists(os.path.join(outputs_directory, "stdout")): # Puslar style working directory. with open(os.path.join(outputs_directory, "stdout"), "rb") as f: tool_stdout = f.read() with open(os.path.join(outputs_directory, "stderr"), "rb") as f: tool_stderr = f.read() job_id_tag = metadata_params["job_id_tag"] exit_code_file = default_exit_code_file(".", job_id_tag) tool_exit_code = read_exit_code_from(exit_code_file, job_id_tag) check_output_detected_state, tool_stdout, tool_stderr, job_messages = check_output(stdio_regexes, stdio_exit_codes, tool_stdout, tool_stderr, tool_exit_code, job_id_tag) if check_output_detected_state == DETECTED_JOB_STATE.OK and not tool_provided_metadata.has_failed_outputs(): final_job_state = Job.states.OK else: final_job_state = Job.states.ERROR version_string = "" if os.path.exists(COMMAND_VERSION_FILENAME): version_string = open(COMMAND_VERSION_FILENAME).read() expression_context = ExpressionContext(dict(stdout=tool_stdout, stderr=tool_stderr)) # Load outputs. export_store = store.DirectoryModelExportStore('metadata/outputs_populated', serialize_dataset_objects=True, for_edit=True, strip_metadata_files=False, serialize_jobs=False) try: import_model_store = store.imported_store_for_metadata('metadata/outputs_new', object_store=object_store) except AssertionError: # Remove in 21.09, this should only happen for jobs that started on <= 20.09 and finish now import_model_store = None job_context = SessionlessJobContext( metadata_params, tool_provided_metadata, object_store, export_store, import_model_store, os.path.join(tool_job_working_directory, "working"), final_job_state=final_job_state, ) unnamed_id_to_path = {} for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs(): destination = unnamed_output_dict["destination"] elements = unnamed_output_dict["elements"] destination_type = destination["type"] if destination_type == 'hdas': for element in elements: filename = element.get('filename') if filename: unnamed_id_to_path[element['object_id']] = os.path.join(job_context.job_working_directory, filename) for output_name, output_dict in outputs.items(): dataset_instance_id = output_dict["id"] klass = getattr(galaxy.model, output_dict.get('model_class', 'HistoryDatasetAssociation')) dataset = None if import_model_store: dataset = import_model_store.sa_session.query(klass).find(dataset_instance_id) if dataset is None: # legacy check for jobs that started before 21.01, remove on 21.05 filename_in = os.path.join("metadata/metadata_in_%s" % output_name) import pickle dataset = pickle.load(open(filename_in, 'rb')) # load DatasetInstance assert dataset is not None filename_kwds = os.path.join("metadata/metadata_kwds_%s" % output_name) filename_out = os.path.join("metadata/metadata_out_%s" % output_name) filename_results_code = os.path.join("metadata/metadata_results_%s" % output_name) override_metadata = os.path.join("metadata/metadata_override_%s" % output_name) dataset_filename_override = output_dict["filename_override"] # pre-20.05 this was a per job parameter and not a per dataset parameter, drop in 21.XX legacy_object_store_store_by = metadata_params.get("object_store_store_by", "id") # Same block as below... set_meta_kwds = stringify_dictionary_keys(json.load(open(filename_kwds))) # load kwds; need to ensure our keywords are not unicode try: dataset.dataset.external_filename = unnamed_id_to_path.get(dataset_instance_id, dataset_filename_override) store_by = output_dict.get("object_store_store_by", legacy_object_store_store_by) extra_files_dir_name = "dataset_%s_files" % getattr(dataset.dataset, store_by) files_path = os.path.abspath(os.path.join(tool_job_working_directory, "working", extra_files_dir_name)) dataset.dataset.external_extra_files_path = files_path file_dict = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if 'ext' in file_dict: dataset.extension = file_dict['ext'] # Metadata FileParameter types may not be writable on a cluster node, and are therefore temporarily substituted with MetadataTempFiles override_metadata = json.load(open(override_metadata)) for metadata_name, metadata_file_override in override_metadata: if MetadataTempFile.is_JSONified_value(metadata_file_override): metadata_file_override = MetadataTempFile.from_JSON(metadata_file_override) setattr(dataset.metadata, metadata_name, metadata_file_override) if output_dict.get("validate", False): set_validated_state(dataset) set_meta(dataset, file_dict) if extended_metadata_collection: meta = tool_provided_metadata.get_dataset_meta(output_name, dataset.dataset.id, dataset.dataset.uuid) if meta: context = ExpressionContext(meta, expression_context) else: context = expression_context # Lazy and unattached # if getattr(dataset, "hidden_beneath_collection_instance", None): # dataset.visible = False dataset.blurb = 'done' dataset.peek = 'no peek' dataset.info = (dataset.info or '') if context['stdout'].strip(): # Ensure white space between entries dataset.info = dataset.info.rstrip() + "\n" + context['stdout'].strip() if context['stderr'].strip(): # Ensure white space between entries dataset.info = dataset.info.rstrip() + "\n" + context['stderr'].strip() dataset.tool_version = version_string dataset.set_size() if 'uuid' in context: dataset.dataset.uuid = context['uuid'] if dataset_filename_override and dataset_filename_override != dataset.file_name: # This has to be a job with outputs_to_working_directory set. # We update the object store with the created output file. object_store.update_from_file(dataset.dataset, file_name=dataset_filename_override, create=True) collect_extra_files(object_store, dataset, ".") if Job.states.ERROR == final_job_state: dataset.blurb = "error" dataset.mark_unhidden() else: # If the tool was expected to set the extension, attempt to retrieve it if dataset.ext == 'auto': dataset.extension = context.get('ext', 'data') dataset.init_meta(copy_from=dataset) # This has already been done: # else: # self.external_output_metadata.load_metadata(dataset, output_name, self.sa_session, working_directory=self.working_directory, remote_metadata_directory=remote_metadata_directory) line_count = context.get('line_count', None) try: # Certain datatype's set_peek methods contain a line_count argument dataset.set_peek(line_count=line_count) except TypeError: # ... and others don't dataset.set_peek() for context_key in TOOL_PROVIDED_JOB_METADATA_KEYS: if context_key in context: context_value = context[context_key] setattr(dataset, context_key, context_value) # We never want to persist the external_filename. dataset.dataset.external_filename = None export_store.add_dataset(dataset) else: dataset.metadata.to_JSON_dict(filename_out) # write out results of set_meta json.dump((True, 'Metadata has been set successfully'), open(filename_results_code, 'wt+')) # setting metadata has succeeded except Exception: json.dump((False, traceback.format_exc()), open(filename_results_code, 'wt+')) # setting metadata has failed somehow if extended_metadata_collection: # discover extra outputs... output_collections = {} for name, output_collection in metadata_params["output_collections"].items(): output_collections[name] = import_model_store.sa_session.query(HistoryDatasetCollectionAssociation).find(output_collection["id"]) outputs = {} for name, output in metadata_params["outputs"].items(): klass = getattr(galaxy.model, output.get('model_class', 'HistoryDatasetAssociation')) outputs[name] = import_model_store.sa_session.query(klass).find(output["id"]) input_ext = json.loads(metadata_params["job_params"].get("__input_ext", '"data"')) collect_primary_datasets( job_context, outputs, input_ext=input_ext, ) collect_dynamic_outputs(job_context, output_collections) if export_store: export_store._finalize() write_job_metadata(tool_job_working_directory, job_metadata, set_meta, tool_provided_metadata)
15,762
def _monitor_events(hass, name, api, event_codes): event_codes = set(event_codes) while True: api.available_flag.wait() try: for code, payload in api.event_actions("All", retries=5): event_data = {"camera": name, "event": code, "payload": payload} hass.bus.fire("amcrest", event_data) if code in event_codes: signal = service_signal(SERVICE_EVENT, name, code) start = any( str(key).lower() == "action" and str(val).lower() == "start" for (key, val) in payload.items() ) _LOGGER.debug("Sending signal: '%s': %s", signal, start) dispatcher_send(hass, signal, start) except AmcrestError as error: _LOGGER.warning( "Error while processing events from %s camera: %r", name, error )
def _monitor_events(hass, name, api, event_codes): event_codes = set(event_codes) while True: api.available_flag.wait() try: for code, payload in api.event_actions("All", retries=5): event_data = {"camera": name, "event": code, "payload": payload} hass.bus.fire("amcrest", event_data) if code in event_codes: signal = service_signal(SERVICE_EVENT, name, code) start = any( str(key).lower() == "action" and str(val).lower() == "start" for key, val in payload.items() ) _LOGGER.debug("Sending signal: '%s': %s", signal, start) dispatcher_send(hass, signal, start) except AmcrestError as error: _LOGGER.warning( "Error while processing events from %s camera: %r", name, error )
6,665
def get_vat_amount(doc): vat_settings = frappe.db.get_value('KSA VAT Setting', {'company': doc.company}) vat_accounts = [] vat_amount = 0 if vat_settings: vat_settings_doc = frappe.get_doc('KSA VAT Setting', vat_settings) for row in vat_settings_doc.get('ksa_vat_sales_accounts'): vat_accounts.append(row.account) for tax in doc.get('taxes'): if tax.account_head in vat_accounts: vat_amount += tax.tax_amount return vat_amount
def get_vat_amount(doc): vat_settings = frappe.db.get_value('KSA VAT Setting', {'company': doc.company}) vat_accounts = [] vat_amount = 0 if vat_settings: vat_settings_doc = frappe.get_cached_doc('KSA VAT Setting', vat_settings) for row in vat_settings_doc.get('ksa_vat_sales_accounts'): vat_accounts.append(row.account) for tax in doc.get('taxes'): if tax.account_head in vat_accounts: vat_amount += tax.tax_amount return vat_amount
10,266
def main(): argument_spec = mso_argument_spec() argument_spec.update( schema=dict(type='str', required=True), template=dict(type='str', required=True), vrf=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects display_name=dict(type='str'), layer3_multicast=dict(type='bool'), regions=dict(type='list'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['vrf']], ['state', 'present', ['vrf']], ], ) schema = module.params['schema'] template = module.params['template'] vrf = module.params['vrf'] display_name = module.params['display_name'] layer3_multicast = module.params['layer3_multicast'] regions = module.params['regions'] state = module.params['state'] mso = MSOModule(module) # Get schema_id schema_obj = mso.get_obj('schemas', displayName=schema) if not schema_obj: mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) schema_path = 'schemas/{id}'.format(**schema_obj) # Get template templates = [t['name'] for t in schema_obj['templates']] if template not in templates: mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates))) template_idx = templates.index(template) # Get ANP vrfs = [v['name'] for v in schema_obj['templates'][template_idx]['vrfs']] if vrf is not None and vrf in vrfs: vrf_idx = vrfs.index(vrf) mso.existing = schema_obj['templates'][template_idx]['vrfs'][vrf_idx] if state == 'query': if vrf is None: mso.existing = schema_obj['templates'][template_idx]['vrfs'] elif not mso.existing: mso.fail_json(msg="VRF '{vrf}' not found".format(vrf=vrf)) mso.exit_json() vrfs_path = '/templates/{0}/vrfs'.format(template) vrf_path = '/templates/{0}/vrfs/{1}'.format(template, vrf) ops = [] mso.previous = mso.existing if state == 'absent': if mso.existing: mso.sent = mso.existing = {} ops.append(dict(op='remove', path=vrf_path)) elif state == 'present': if display_name is None and not mso.existing: display_name = vrf payload = dict( name=vrf, displayName=display_name, l3MCast=layer3_multicast, # FIXME regions= regions, ) mso.sanitize(payload, collate=True) if mso.existing: ops.append(dict(op='replace', path=vrf_path, value=mso.sent)) else: ops.append(dict(op='add', path=vrfs_path + '/-', value=mso.sent)) mso.existing = mso.proposed if not module.check_mode: mso.request(schema_path, method='PATCH', data=ops) mso.exit_json()
def main(): argument_spec = mso_argument_spec() argument_spec.update( schema=dict(type='str', required=True), template=dict(type='str', required=True), vrf=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects display_name=dict(type='str'), layer3_multicast=dict(type='bool'), regions=dict(type='list'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['vrf']], ['state', 'present', ['vrf']], ], ) schema = module.params['schema'] template = module.params['template'] vrf = module.params['vrf'] display_name = module.params['display_name'] layer3_multicast = module.params['layer3_multicast'] regions = module.params['regions'] state = module.params['state'] mso = MSOModule(module) # Get schema_id schema_obj = mso.get_obj('schemas', displayName=schema) if not schema_obj: mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema)) schema_path = 'schemas/{id}'.format(**schema_obj) # Get template templates = [t['name'] for t in schema_obj['templates']] if template not in templates: mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates))) template_idx = templates.index(template) # Get ANP vrfs = [v['name'] for v in schema_obj['templates'][template_idx]['vrfs']] if vrf is not None and vrf in vrfs: vrf_idx = vrfs.index(vrf) mso.existing = schema_obj['templates'][template_idx]['vrfs'][vrf_idx] if state == 'query': if vrf is None: mso.existing = schema_obj['templates'][template_idx]['vrfs'] elif not mso.existing: mso.fail_json(msg="VRF '{vrf}' not found".format(vrf=vrf)) mso.exit_json() vrfs_path = '/templates/{0}/vrfs'.format(template) vrf_path = '/templates/{0}/vrfs/{1}'.format(template, vrf) ops = [] mso.previous = mso.existing if state == 'absent': if mso.existing: mso.sent = mso.existing = {} ops.append(dict(op='remove', path=vrf_path)) elif state == 'present': if display_name is None and not mso.existing: display_name = vrf payload = dict( name=vrf, displayName=display_name, l3MCast=layer3_multicast, # FIXME regions= regions, ) mso.sanitize(payload, collate=True) if mso.existing: ops.append(dict(op='replace', path=vrf_path, value=mso.sent)) else: ops.append(dict(op='add', path=vrfs_path + '/-', value=mso.sent)) mso.existing = mso.proposed if not module.check_mode: mso.request(schema_path, method='PATCH', data=ops) mso.exit_json()
13,559
def QR_iteration(H, shifts): """Perform the QR iteration. Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an unreduced upper Hessenberg matrix. If a complex shift occurs a double step is peformed in order to avoid complex arithmetic. Parameters ---------- H The |NumPy array| H which is an unreduced upper Hessenberg matrix. shifts A |NumPy array| which contains the shifts that are to be applied in the QR steps. Returns ------- Hs A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`. Qs The product of the orthogonal matrices computed in each QR step. """ Qs = np.eye(len(H)) i = 0 while i < len(shifts) - 1: s = shifts[i] if shifts[i].imag != 0: Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H))) i = i + 2 else: Q, R = np.linalg.qr(H - s * np.eye(len(H))) i = i + 1 Qs = Qs @ Q H = Q.T @ H @ Q return H, Qs
def QR_iteration(H, shifts): """Perform the QR iteration. Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an unreduced upper Hessenberg matrix. If a complex shift occurs a double step is peformed in order to avoid complex arithmetic. Parameters ---------- H The |NumPy array| H which is an unreduced upper Hessenberg matrix. shifts A |NumPy array| which contains the shifts that are to be applied in the QR steps. Returns ------- Hs A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`. Qs The product of the orthogonal matrices computed in each QR step. """ Qs = np.eye(len(H)) i = 0 while i < len(shifts) - 1: s = shifts[i] if shifts[i].imag != 0: Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H))) i = i + 2 else: Q, _ = np.linalg.qr(H - s * np.eye(len(H))) i = i + 1 Qs = Qs @ Q H = Q.T @ H @ Q return H, Qs
20,092
def _get_client(kwargs): client_config = _get_desired_operation_input('client', kwargs) if client_config: client = CloudifyClient(**client_config) # for determining an external client: manager_ips = [mgr.private_ip for mgr in manager.get_rest_client().manager.get_managers()] internal_hosts = ({'127.0.0.1', 'localhost'} | set(manager_ips)) host = {client.host} if type(client.host) == str \ else set(client.host) is_external_host = not (host & internal_hosts) else: client = manager.get_rest_client() is_external_host = False return client, is_external_host
def _get_client(kwargs): client_config = _get_desired_operation_input('client', kwargs) if client_config: client = CloudifyClient(**client_config) # for determining an external client: manager_ips = [mgr.private_ip for mgr in manager.get_rest_client().manager.get_managers()] internal_hosts = ({'127.0.0.1', 'localhost'} | set(manager_ips)) host = {client.host} if isinstance(client.host, str) \ else set(client.host) is_external_host = not (host & internal_hosts) else: client = manager.get_rest_client() is_external_host = False return client, is_external_host
31,809
def fetch_incidents( client: Client, max_results: int, last_run: Dict[str, int], first_fetch_time: Optional[int], incident_types: List[str]) -> Tuple[Dict[str, int], List[dict]]: # Get the last fetch time, if exists last_fetch = last_run.get('last_fetch', None) if last_fetch is None: # if missing, use what provided via first_fetch_time last_fetch = first_fetch_time else: # otherwise use the stored last fetch last_fetch = int(last_fetch) # for type checking, making sure that latest_created_time is int latest_created_time = cast(int, last_fetch) # Initialize an empty list of incidents to return # Each incident is a dict with a string as a key incidents: List[Dict[str, Any]] = [] # Fetch alerts from RaDark. alerts = client.search_alerts( start_time=last_fetch, max_results=max_results, incident_types=incident_types)['data']['aggregations'] for alert in alerts: # If no created_time set is as epoch (0). We use time in ms so we must # convert it from the HelloWorld API response incident_created_time = int(alert.get('incident_date', '0')) # to prevent duplicates, we are only adding incidents with creation_time > last fetched incident if last_fetch and incident_created_time <= last_fetch: continue # Prevent unsupported sub type to fetch incidents sub_type = alert['sub_type'] if sub_type in SUPPORTED_SUB_TYPES: alert['type_description'] = get_name(alert['type']) alert['incident_url'] = INCIDENT_URL.format(MONITOR_ID=MONITOR_ID, item_id=alert['feed_property_id']) alert.pop('title', None) # Add monitor ID to the incident. alert['monitor_id'] = MONITOR_ID # Parse incident incident = { 'name': get_name(alert['type']), 'occurred': timestamp_to_datestring(incident_created_time * 1000), # timestamp in ms 'rawJSON': json.dumps(alert) } incidents.append(incident) # Update last run if needed if incident_created_time > latest_created_time: latest_created_time = incident_created_time # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': latest_created_time} return next_run, incidents
def fetch_incidents( client: Client, max_results: int, last_run: Dict[str, int], first_fetch_time: Optional[int], incident_types: List[str]) -> Tuple[Dict[str, int], List[dict]]: # Get the last fetch time, if exists last_fetch = last_run.get('last_fetch', None) if not last_fetch: # if missing, use what provided via first_fetch_time last_fetch = first_fetch_time else: # otherwise use the stored last fetch last_fetch = int(last_fetch) # for type checking, making sure that latest_created_time is int latest_created_time = cast(int, last_fetch) # Initialize an empty list of incidents to return # Each incident is a dict with a string as a key incidents: List[Dict[str, Any]] = [] # Fetch alerts from RaDark. alerts = client.search_alerts( start_time=last_fetch, max_results=max_results, incident_types=incident_types)['data']['aggregations'] for alert in alerts: # If no created_time set is as epoch (0). We use time in ms so we must # convert it from the HelloWorld API response incident_created_time = int(alert.get('incident_date', '0')) # to prevent duplicates, we are only adding incidents with creation_time > last fetched incident if last_fetch and incident_created_time <= last_fetch: continue # Prevent unsupported sub type to fetch incidents sub_type = alert['sub_type'] if sub_type in SUPPORTED_SUB_TYPES: alert['type_description'] = get_name(alert['type']) alert['incident_url'] = INCIDENT_URL.format(MONITOR_ID=MONITOR_ID, item_id=alert['feed_property_id']) alert.pop('title', None) # Add monitor ID to the incident. alert['monitor_id'] = MONITOR_ID # Parse incident incident = { 'name': get_name(alert['type']), 'occurred': timestamp_to_datestring(incident_created_time * 1000), # timestamp in ms 'rawJSON': json.dumps(alert) } incidents.append(incident) # Update last run if needed if incident_created_time > latest_created_time: latest_created_time = incident_created_time # Save the next_run as a dict with the last_fetch key to be stored next_run = {'last_fetch': latest_created_time} return next_run, incidents
47,601
def pipeline( task: str = None, model: Optional = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, use_auth_token: Optional[Union[str, bool]] = None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, **kwargs ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. Pipelines are made of: - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. - A [model](model) to make predictions from the inputs. - Some (optional) post processing for enhancing model's output. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. - `"conversational"`: will return a [`ConversationalPipeline`]. - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationPipeline`]. - `"text-generation"`: will return a [`TextGenerationPipeline`]:. - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. - `"translation"`: will return a [`TranslationPipeline`]. - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. - `"summarization"`: will return a [`SummarizationPipeline`]. - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or [`TFPreTrainedModel`] (for TensorFlow). If not provided, the default for the `task` will be loaded. config (`str` or [`PretrainedConfig`], *optional*): The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. If not provided, the default configuration file for the requested model will be used. That means that if `model` is given, its default configuration will be used. However, if `model` is not supplied, this `task`'s default model's config is used instead. tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default tokenizer for the given `task` will be loaded. feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed. If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default feature extractor for the given `task` will be loaded. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. revision (`str`, *optional*, defaults to `"main"`): When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. use_fast (`bool`, *optional*, defaults to `True`): Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically. [More information](https://huggingface.co/docs/accelerate/main/en/big_modeling#accelerate.cpu_offload) <Tip warning={true}> Do not use `device_map` AND `device` at the same time as they will conflict </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. kwargs: Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values). Returns: [`Pipeline`]: A suitable pipeline for the task. Examples: ```python >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer >>> # Sentiment analysis pipeline >>> pipeline("sentiment-analysis") >>> # Question answering pipeline, specifying the checkpoint identifier >>> pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased") >>> # Named entity recognition pipeline, passing in a specific model and tokenizer >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> pipeline("ner", model=model, tokenizer=tokenizer) ```""" if model_kwargs is None: model_kwargs = {} # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, # this is to keep BC). use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token) hub_kwargs = {"revision": revision, "use_auth_token": use_auth_token, "trust_remote_code": trust_remote_code} if task is None and model is None: raise RuntimeError( "Impossible to instantiate a pipeline without either a task or a model " "being specified. " "Please provide a task class or a model" ) if model is None and tokenizer is not None: raise RuntimeError( "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" " may not be compatible with the default model. Please provide a PreTrainedModel class or a" " path/identifier to a pretrained model when providing tokenizer." ) if model is None and feature_extractor is not None: raise RuntimeError( "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" " or a path/identifier to a pretrained model when providing feature_extractor." ) # Config is the primordial information item. # Instantiate config if needed if isinstance(config, str): config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs) elif config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) custom_tasks = {} if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: custom_tasks = config.custom_pipelines if task is None and trust_remote_code is not False: if len(custom_tasks) == 1: task = list(custom_tasks.keys())[0] else: raise RuntimeError( "We can't infer the task automatically for this model as there are multiple tasks available. Pick " f"one in {', '.join(custom_tasks.keys())}" ) if task is None and model is not None: if not isinstance(model, str): raise RuntimeError( "Inferring the task automatically requires to check the hub with a model_id defined as a `str`." f"{model} is not a valid model_id." ) task = get_task(model, use_auth_token) # Retrieve the task if task in custom_tasks: targeted_task, task_options = clean_custom_task(custom_tasks[task]) if pipeline_class is None: if not trust_remote_code: raise ValueError( "Loading this pipeline requires you to execute the code in the pipeline file in that" " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" " set the option `trust_remote_code=True` to remove this error." ) class_ref = targeted_task["impl"] module_file, class_name = class_ref.split(".") pipeline_class = get_class_from_dynamic_module( model, module_file + ".py", class_name, revision=revision, use_auth_token=use_auth_token ) else: targeted_task, task_options = check_task(task) if pipeline_class is None: pipeline_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: # At that point framework might still be undetermined model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) revision = revision if revision is not None else default_revision logger.warning( f"No model was supplied, defaulted to {model} and revision" f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" "Using a pipeline without specifying a model name and revision in production is not recommended." ) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) if device_map is not None: if "device_map" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' " arguments might conflict, use only one.)" ) model_kwargs["device_map"] = device_map if torch_dtype is not None: if "torch_dtype" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' " arguments might conflict, use only one.)" ) model_kwargs["torch_dtype"] = torch_dtype model_name = model if isinstance(model, str) else None # Infer the framework from the model # Forced if framework already defined, inferred if it's None # Will load the correct model if possible model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} framework, model = infer_framework_load_model( model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs, ) model_config = model.config load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None if task in NO_TOKENIZER_TASKS: # These will never require a tokenizer. # the model on the other hand might have a tokenizer, but # the files could be missing from the hub, instead of failing # on such repos, we just force to not load it. load_tokenizer = False if not load_feature_extractor and task not in NO_FEATURE_EXTRACTOR_TASKS: raise EnvironmentError( f"There is a problem in `transformers`. The task {task} requires a feature extractor, however the model {type(model_config)} seems to not support feature-extractors. Please report this issue. This is likely a misconfiguration in the library, please report this issue." ) if not load_tokenizer and task not in NO_TOKENIZER_TASKS: raise EnvironmentError( f"There is a problem in `transformers`. The task {task} requires a tokenizer, however the model {type(model_config)} seems to not support tokenizer. This is likely a misconfiguration in the library, please report this issue." ) if task in NO_FEATURE_EXTRACTOR_TASKS: load_feature_extractor = False if load_tokenizer: # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model_name, str): tokenizer = model_name elif isinstance(config, str): tokenizer = config else: # Impossible to guess what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) use_fast = tokenizer[1].pop("use_fast", use_fast) tokenizer_identifier = tokenizer[0] tokenizer_kwargs = tokenizer[1] else: tokenizer_identifier = tokenizer tokenizer_kwargs = model_kwargs tokenizer = AutoTokenizer.from_pretrained( tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs ) if load_feature_extractor: # Try to infer feature extractor from model or config name (if provided as str) if feature_extractor is None: if isinstance(model_name, str): feature_extractor = model_name elif isinstance(config, str): feature_extractor = config else: # Impossible to guess what is the right feature_extractor here raise Exception( "Impossible to guess which feature extractor to use. " "Please provide a PreTrainedFeatureExtractor class or a path/identifier " "to a pretrained feature extractor." ) # Instantiate feature_extractor if needed if isinstance(feature_extractor, (str, tuple)): feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and isinstance(model_name, str) ): try: import kenlm # to trigger `ImportError` if not installed from pyctcdecode import BeamSearchDecoderCTC if os.path.isdir(model_name) or os.path.isfile(model_name): decoder = BeamSearchDecoderCTC.load_from_dir(model_name) else: language_model_glob = os.path.join( BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" ) alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_regex = [language_model_glob, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_regex=allow_regex) kwargs["decoder"] = decoder except ImportError as e: logger.warning( f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Try to install" " `pyctcdecode` and `kenlm`: (`pip install pyctcdecode`, `pip install" f" https://github.com/kpu/kenlm/archive/master.zip`): Error: {e}" ) if task == "translation" and model.config.task_specific_params: for key in model.config.task_specific_params: if key.startswith("translation"): task = key warnings.warn( f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning, ) break if tokenizer is not None: kwargs["tokenizer"] = tokenizer if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor return pipeline_class(model=model, framework=framework, task=task, **kwargs)
def pipeline( task: str = None, model: Optional = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, PreTrainedTokenizerFast]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, use_auth_token: Optional[Union[str, bool]] = None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, **kwargs ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. Pipelines are made of: - A [tokenizer](tokenizer) in charge of mapping raw textual input to token. - A [model](model) to make predictions from the inputs. - Some (optional) post processing for enhancing model's output. Args: task (`str`): The task defining which pipeline will be returned. Currently accepted tasks are: - `"audio-classification"`: will return a [`AudioClassificationPipeline`]. - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`]. - `"conversational"`: will return a [`ConversationalPipeline`]. - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`]. - `"fill-mask"`: will return a [`FillMaskPipeline`]:. - `"image-classification"`: will return a [`ImageClassificationPipeline`]. - `"question-answering"`: will return a [`QuestionAnsweringPipeline`]. - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`]. - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`]. - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationPipeline`]. - `"text-generation"`: will return a [`TextGenerationPipeline`]:. - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`]. - `"translation"`: will return a [`TranslationPipeline`]. - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`]. - `"summarization"`: will return a [`SummarizationPipeline`]. - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`]. model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*): The model that will be used by the pipeline to make predictions. This can be a model identifier or an actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or [`TFPreTrainedModel`] (for TensorFlow). If not provided, the default for the `task` will be loaded. config (`str` or [`PretrainedConfig`], *optional*): The configuration that will be used by the pipeline to instantiate the model. This can be a model identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`]. If not provided, the default configuration file for the requested model will be used. That means that if `model` is given, its default configuration will be used. However, if `model` is not supplied, this `task`'s default model's config is used instead. tokenizer (`str` or [`PreTrainedTokenizer`], *optional*): The tokenizer that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`]. If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default tokenizer for the given `task` will be loaded. feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*): The feature extractor that will be used by the pipeline to encode data for the model. This can be a model identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`]. Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal models. Multi-modal models will also require a tokenizer to be passed. If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it is a string). However, if `config` is also not given or not a string, then the default feature extractor for the given `task` will be loaded. framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. revision (`str`, *optional*, defaults to `"main"`): When passing a task name or a string model identifier: The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. use_fast (`bool`, *optional*, defaults to `True`): Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `transformers-cli login` (stored in `~/.huggingface`). device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically. [More information](https://huggingface.co/docs/accelerate/main/en/big_modeling#accelerate.cpu_offload) <Tip warning={true}> Do not use `device_map` AND `device` at the same time as they will conflict </Tip> torch_dtype (`str` or `torch.dtype`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model (`torch.float16`, `torch.bfloat16`, ... or `"auto"`). trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom code defined on the Hub in their own modeling, configuration, tokenization or even pipeline files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. model_kwargs: Additional dictionary of keyword arguments passed along to the model's `from_pretrained(..., **model_kwargs)` function. kwargs: Additional keyword arguments passed along to the specific pipeline init (see the documentation for the corresponding pipeline class for possible values). Returns: [`Pipeline`]: A suitable pipeline for the task. Examples: ```python >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer >>> # Sentiment analysis pipeline >>> pipeline("sentiment-analysis") >>> # Question answering pipeline, specifying the checkpoint identifier >>> pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased") >>> # Named entity recognition pipeline, passing in a specific model and tokenizer >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> pipeline("ner", model=model, tokenizer=tokenizer) ```""" if model_kwargs is None: model_kwargs = {} # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, # this is to keep BC). use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token) hub_kwargs = {"revision": revision, "use_auth_token": use_auth_token, "trust_remote_code": trust_remote_code} if task is None and model is None: raise RuntimeError( "Impossible to instantiate a pipeline without either a task or a model " "being specified. " "Please provide a task class or a model" ) if model is None and tokenizer is not None: raise RuntimeError( "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer" " may not be compatible with the default model. Please provide a PreTrainedModel class or a" " path/identifier to a pretrained model when providing tokenizer." ) if model is None and feature_extractor is not None: raise RuntimeError( "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided" " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class" " or a path/identifier to a pretrained model when providing feature_extractor." ) # Config is the primordial information item. # Instantiate config if needed if isinstance(config, str): config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs) elif config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) custom_tasks = {} if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: custom_tasks = config.custom_pipelines if task is None and trust_remote_code is not False: if len(custom_tasks) == 1: task = list(custom_tasks.keys())[0] else: raise RuntimeError( "We can't infer the task automatically for this model as there are multiple tasks available. Pick " f"one in {', '.join(custom_tasks.keys())}" ) if task is None and model is not None: if not isinstance(model, str): raise RuntimeError( "Inferring the task automatically requires to check the hub with a model_id defined as a `str`." f"{model} is not a valid model_id." ) task = get_task(model, use_auth_token) # Retrieve the task if task in custom_tasks: targeted_task, task_options = clean_custom_task(custom_tasks[task]) if pipeline_class is None: if not trust_remote_code: raise ValueError( "Loading this pipeline requires you to execute the code in the pipeline file in that" " repo on your local machine. Make sure you have read the code there to avoid malicious use, then" " set the option `trust_remote_code=True` to remove this error." ) class_ref = targeted_task["impl"] module_file, class_name = class_ref.split(".") pipeline_class = get_class_from_dynamic_module( model, module_file + ".py", class_name, revision=revision, use_auth_token=use_auth_token ) else: targeted_task, task_options = check_task(task) if pipeline_class is None: pipeline_class = targeted_task["impl"] # Use default model/config/tokenizer for the task if no model is provided if model is None: # At that point framework might still be undetermined model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options) revision = revision if revision is not None else default_revision logger.warning( f"No model was supplied, defaulted to {model} and revision" f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n" "Using a pipeline without specifying a model name and revision in production is not recommended." ) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) if device_map is not None: if "device_map" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those' " arguments might conflict, use only one.)" ) model_kwargs["device_map"] = device_map if torch_dtype is not None: if "torch_dtype" in model_kwargs: raise ValueError( 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those' " arguments might conflict, use only one.)" ) model_kwargs["torch_dtype"] = torch_dtype model_name = model if isinstance(model, str) else None # Infer the framework from the model # Forced if framework already defined, inferred if it's None # Will load the correct model if possible model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]} framework, model = infer_framework_load_model( model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs, ) model_config = model.config load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None if task in NO_TOKENIZER_TASKS: # These will never require a tokenizer. # the model on the other hand might have a tokenizer, but # the files could be missing from the hub, instead of failing # on such repos, we just force to not load it. load_tokenizer = False if not load_feature_extractor and task not in NO_FEATURE_EXTRACTOR_TASKS: raise EnvironmentError( f"There is a problem in `transformers`. The task {task} requires a feature extractor, however the model {type(model_config)} seems to not support feature-extractors. This is likely a misconfiguration in the library, please report this issue." ) if not load_tokenizer and task not in NO_TOKENIZER_TASKS: raise EnvironmentError( f"There is a problem in `transformers`. The task {task} requires a tokenizer, however the model {type(model_config)} seems to not support tokenizer. This is likely a misconfiguration in the library, please report this issue." ) if task in NO_FEATURE_EXTRACTOR_TASKS: load_feature_extractor = False if load_tokenizer: # Try to infer tokenizer from model or config name (if provided as str) if tokenizer is None: if isinstance(model_name, str): tokenizer = model_name elif isinstance(config, str): tokenizer = config else: # Impossible to guess what is the right tokenizer here raise Exception( "Impossible to guess which tokenizer to use. " "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer." ) # Instantiate tokenizer if needed if isinstance(tokenizer, (str, tuple)): if isinstance(tokenizer, tuple): # For tuple we have (tokenizer name, {kwargs}) use_fast = tokenizer[1].pop("use_fast", use_fast) tokenizer_identifier = tokenizer[0] tokenizer_kwargs = tokenizer[1] else: tokenizer_identifier = tokenizer tokenizer_kwargs = model_kwargs tokenizer = AutoTokenizer.from_pretrained( tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs ) if load_feature_extractor: # Try to infer feature extractor from model or config name (if provided as str) if feature_extractor is None: if isinstance(model_name, str): feature_extractor = model_name elif isinstance(config, str): feature_extractor = config else: # Impossible to guess what is the right feature_extractor here raise Exception( "Impossible to guess which feature extractor to use. " "Please provide a PreTrainedFeatureExtractor class or a path/identifier " "to a pretrained feature extractor." ) # Instantiate feature_extractor if needed if isinstance(feature_extractor, (str, tuple)): feature_extractor = AutoFeatureExtractor.from_pretrained( feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs ) if ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and isinstance(model_name, str) ): try: import kenlm # to trigger `ImportError` if not installed from pyctcdecode import BeamSearchDecoderCTC if os.path.isdir(model_name) or os.path.isfile(model_name): decoder = BeamSearchDecoderCTC.load_from_dir(model_name) else: language_model_glob = os.path.join( BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*" ) alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME allow_regex = [language_model_glob, alphabet_filename] decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_regex=allow_regex) kwargs["decoder"] = decoder except ImportError as e: logger.warning( f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Try to install" " `pyctcdecode` and `kenlm`: (`pip install pyctcdecode`, `pip install" f" https://github.com/kpu/kenlm/archive/master.zip`): Error: {e}" ) if task == "translation" and model.config.task_specific_params: for key in model.config.task_specific_params: if key.startswith("translation"): task = key warnings.warn( f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning, ) break if tokenizer is not None: kwargs["tokenizer"] = tokenizer if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor return pipeline_class(model=model, framework=framework, task=task, **kwargs)
55,108
def draw( qnode, charset="unicode", wire_order=None, show_all_wires=False, max_length=None, expansion_strategy=None, ): """Create a function that draws the given qnode. Args: qnode (.QNode): the input QNode that is to be drawn. charset (str, optional): The charset that should be used. Currently, "unicode" and "ascii" are supported. wire_order (Sequence[Any]): the order (from top to bottom) to print the wires of the circuit show_all_wires (bool): If True, all wires, including empty wires, are printed. max_length (int, optional): Maximum string width (columns) when printing the circuit to the CLI. expansion_strategy (str): The strategy to use when circuit expansions or decompositions are required. - ``gradient``: The QNode will attempt to decompose the internal circuit such that all circuit operations are supported by the gradient method. - ``device``: The QNode will attempt to decompose the internal circuit such that all circuit operations are natively supported by the device. Returns: A function that has the same argument signature as ``qnode``. When called, the function will draw the QNode. **Example** Given the following definition of a QNode, .. code-block:: python3 dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(a, w): qml.Hadamard(0) qml.CRX(a, wires=[0, 1]) qml.Rot(*w, wires=[1]) qml.CRX(-a, wires=[0, 1]) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) We can draw the it like such: >>> drawer = qml.draw(circuit) >>> print(drawer(a=2.3, w=[1.2, 3.2, 0.7])) 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩ 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩ Circuit drawing works with devices with custom wire labels: .. code-block:: python3 dev = qml.device('default.qubit', wires=["a", -1, "q2"]) @qml.qnode(dev) def circuit(): qml.Hadamard(wires=-1) qml.CNOT(wires=["a", "q2"]) qml.RX(0.2, wires="a") return qml.expval(qml.PauliX(wires="q2")) When printed, the wire order matches the order defined on the device: >>> drawer = qml.draw(circuit) >>> print(drawer()) a: ─────╭C──RX(0.2)──┤ -1: ──H──│────────────┤ q2: ─────╰X───────────┤ ⟨X⟩ We can use the ``wire_order`` argument to change the wire order: >>> drawer = qml.draw(circuit, wire_order=["q2", "a", -1]) >>> print(drawer()) q2: ──╭X───────────┤ ⟨X⟩ a: ──╰C──RX(0.2)──┤ -1: ───H───────────┤ """ @wraps(qnode) def wrapper(*args, **kwargs): original_expansion_strategy = getattr(qnode, "expansion_strategy", None) try: qnode.expansion_strategy = expansion_strategy or original_expansion_strategy tapes = qnode.construct(args, kwargs) finally: qnode.expansion_strategy = original_expansion_strategy _wire_order = wire_order or qnode.device.wires _wire_order = qml.wires.Wires(_wire_order) if show_all_wires and len(_wire_order) < qnode.device.num_wires: raise ValueError( "When show_all_wires is enabled, the provided wire order must contain all wires on the device." ) if not qnode.device.wires.contains_wires(_wire_order): raise ValueError( f"Provided wire order {_wire_order.labels} contains wires not contained on the device: {qnode.device.wires}." ) if tapes is not None: res = [ t.draw( charset=charset, wire_order=_wire_order, show_all_wires=show_all_wires, max_length=max_length, ) for t in tapes[0] ] return "\n".join(res) return qnode.qtape.draw( charset=charset, wire_order=_wire_order, show_all_wires=show_all_wires, max_length=max_length, ) return wrapper
def draw( qnode, charset="unicode", wire_order=None, show_all_wires=False, max_length=None, expansion_strategy=None, ): """Create a function that draws the given qnode. Args: qnode (.QNode): the input QNode that is to be drawn. charset (str, optional): The charset that should be used. Currently, "unicode" and "ascii" are supported. wire_order (Sequence[Any]): the order (from top to bottom) to print the wires of the circuit show_all_wires (bool): If True, all wires, including empty wires, are printed. max_length (int, optional): Maximum string width (columns) when printing the circuit to the CLI. expansion_strategy (str): The strategy to use when circuit expansions or decompositions are required. - ``gradient``: The QNode will attempt to decompose the internal circuit such that all circuit operations are supported by the gradient method. - ``device``: The QNode will attempt to decompose the internal circuit such that all circuit operations are natively supported by the device. Returns: A function that has the same argument signature as ``qnode``. When called, the function will draw the QNode. **Example** Given the following definition of a QNode, .. code-block:: python3 dev = qml.device('default.qubit', wires=2) @qml.qnode(dev) def circuit(a, w): qml.Hadamard(0) qml.CRX(a, wires=[0, 1]) qml.Rot(*w, wires=[1]) qml.CRX(-a, wires=[0, 1]) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) We can draw the it like such: >>> drawer = qml.draw(circuit) >>> print(drawer(a=2.3, w=[1.2, 3.2, 0.7])) 0: ──H──╭C────────────────────────────╭C─────────╭┤ ⟨Z ⊗ Z⟩ 1: ─────╰RX(2.3)──Rot(1.2, 3.2, 0.7)──╰RX(-2.3)──╰┤ ⟨Z ⊗ Z⟩ Circuit drawing works with devices with custom wire labels: .. code-block:: python3 dev = qml.device('default.qubit', wires=["a", -1, "q2"]) @qml.qnode(dev) def circuit(): qml.Hadamard(wires=-1) qml.CNOT(wires=["a", "q2"]) qml.RX(0.2, wires="a") return qml.expval(qml.PauliX(wires="q2")) When printed, the wire order matches the order defined on the device: fig, ax = draw_mpl(circuit)(1.2345,1.2345) plt.show() >>> print(drawer()) a: ─────╭C──RX(0.2)──┤ -1: ──H──│────────────┤ q2: ─────╰X───────────┤ ⟨X⟩ We can use the ``wire_order`` argument to change the wire order: >>> drawer = qml.draw(circuit, wire_order=["q2", "a", -1]) >>> print(drawer()) q2: ──╭X───────────┤ ⟨X⟩ a: ──╰C──RX(0.2)──┤ -1: ───H───────────┤ """ @wraps(qnode) def wrapper(*args, **kwargs): original_expansion_strategy = getattr(qnode, "expansion_strategy", None) try: qnode.expansion_strategy = expansion_strategy or original_expansion_strategy tapes = qnode.construct(args, kwargs) finally: qnode.expansion_strategy = original_expansion_strategy _wire_order = wire_order or qnode.device.wires _wire_order = qml.wires.Wires(_wire_order) if show_all_wires and len(_wire_order) < qnode.device.num_wires: raise ValueError( "When show_all_wires is enabled, the provided wire order must contain all wires on the device." ) if not qnode.device.wires.contains_wires(_wire_order): raise ValueError( f"Provided wire order {_wire_order.labels} contains wires not contained on the device: {qnode.device.wires}." ) if tapes is not None: res = [ t.draw( charset=charset, wire_order=_wire_order, show_all_wires=show_all_wires, max_length=max_length, ) for t in tapes[0] ] return "\n".join(res) return qnode.qtape.draw( charset=charset, wire_order=_wire_order, show_all_wires=show_all_wires, max_length=max_length, ) return wrapper
44,080
def _hermite_moment(alpha, beta, t, e, rc): r"""Compute Hermite moment integral recursively. The Hermite moment integral in one dimension is defined as .. math:: \int_{-\infty }^{+\infty} x_C^e \Lambda_t dx, where :math:`e` is the multipole moment order, :math:`C` is the origin of the Cartesian coordinates, and :math:`\Lambda_t` is the :math:`t` component of the Hermite Gaussian. The integral can be computed recursively as [`Helgaker (1995) p802 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_] .. math:: M_{t}^{e+1} = t M_{t-1}^{e} + X_PC M_{t}^{e} + \frac{1}{2p} M_{t+1}^{e}. This integral is zero for :math:`t > e` and .. math:: M_t^0 = \delta _{t0} \sqrt{\frac{\pi}{p}}, where :math:`p = \alpha + \beta` and :math:`\alpha, \beta` are the exponents of the Gaussian functions that construct the Hermite Gaussian :math:`\Lambda`. Args: alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function t (integer): order of the Hermite Gaussian e (integer): order of the multipole moment rc (array[float]): distance between the center of the Hermite Gaussian and the origin Returns: array[float]: expansion coefficients for each Gaussian combination **Example** >>> alpha = np.array([3.42525091]) >>> beta = np.array([3.42525091]) >>> t = 0 >>> e = 1 >>> rc = 1.5 >>> _hermite_moment(alpha, beta, t, e, rc) array([1.0157925]) """ p = anp.array(alpha + beta) if t > e: return 0.0 if e == 0 and t != 0: return 0.0 if e == 0 and t == 0: return anp.sqrt(anp.pi / p) m = ( _hermite_moment(alpha, beta, t - 1, e - 1, rc) * t + _hermite_moment(alpha, beta, t, e - 1, rc) * rc + _hermite_moment(alpha, beta, t + 1, e - 1, rc) / (2 * p) ) return m
def _hermite_moment(alpha, beta, t, e, rc): r"""Recursively computes the Hermite moment integral. The Hermite moment integral in one dimension is defined as .. math:: \int_{-\infty }^{+\infty} x_C^e \Lambda_t dx, where :math:`e` is the multipole moment order, :math:`C` is the origin of the Cartesian coordinates, and :math:`\Lambda_t` is the :math:`t` component of the Hermite Gaussian. The integral can be computed recursively as [`Helgaker (1995) p802 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_] .. math:: M_{t}^{e+1} = t M_{t-1}^{e} + X_PC M_{t}^{e} + \frac{1}{2p} M_{t+1}^{e}. This integral is zero for :math:`t > e` and .. math:: M_t^0 = \delta _{t0} \sqrt{\frac{\pi}{p}}, where :math:`p = \alpha + \beta` and :math:`\alpha, \beta` are the exponents of the Gaussian functions that construct the Hermite Gaussian :math:`\Lambda`. Args: alpha (array[float]): exponent of the first Gaussian function beta (array[float]): exponent of the second Gaussian function t (integer): order of the Hermite Gaussian e (integer): order of the multipole moment rc (array[float]): distance between the center of the Hermite Gaussian and the origin Returns: array[float]: expansion coefficients for each Gaussian combination **Example** >>> alpha = np.array([3.42525091]) >>> beta = np.array([3.42525091]) >>> t = 0 >>> e = 1 >>> rc = 1.5 >>> _hermite_moment(alpha, beta, t, e, rc) array([1.0157925]) """ p = anp.array(alpha + beta) if t > e: return 0.0 if e == 0 and t != 0: return 0.0 if e == 0 and t == 0: return anp.sqrt(anp.pi / p) m = ( _hermite_moment(alpha, beta, t - 1, e - 1, rc) * t + _hermite_moment(alpha, beta, t, e - 1, rc) * rc + _hermite_moment(alpha, beta, t + 1, e - 1, rc) / (2 * p) ) return m
58,300
def run_workspace_command( args: MkosiArgs, root: Path, cmd: Sequence[PathString], network: bool = False, env: Optional[Mapping[str, str]] = None, nspawn_params: Optional[List[str]] = None, capture_stdout: bool = False, check: bool = True, ) -> CompletedProcess: nspawn = [ nspawn_executable(), "--quiet", f"--directory={root}", "--uuid=" + args.machine_id, "--machine=mkosi-" + uuid.uuid4().hex, "--as-pid2", "--register=no", f"--bind={var_tmp(root)}:/var/tmp", "--setenv=SYSTEMD_OFFLINE=1", *nspawn_rlimit_params(), ] stdout = None if network: # If we're using the host network namespace, use the same resolver nspawn += ["--bind-ro=/etc/resolv.conf"] else: nspawn += ["--private-network"] if env: nspawn += [f"--setenv={k}={v}" for k, v in env.items()] if "workspace-command" in ARG_DEBUG: nspawn += ["--setenv=SYSTEMD_LOG_LEVEL=debug"] if args.image_id: nspawn += [f"--setenv={args.image_id}"] if args.image_version: nspawn += [f"--setenv={args.image_version}"] if nspawn_params: nspawn += nspawn_params if capture_stdout: stdout = subprocess.PIPE nspawn += ["--console=pipe"] if args.usr_only: nspawn += [f"--bind={root_home(args, root)}:/root"] if args.nspawn_keep_unit: nspawn += ["--keep-unit"] try: return run([*nspawn, "--", *cmd], check=check, stdout=stdout, text=capture_stdout) except subprocess.CalledProcessError as e: if "workspace-command" in ARG_DEBUG: run(nspawn, check=False) die(f"Workspace command {shell_join(cmd)} returned non-zero exit code {e.returncode}.")
def run_workspace_command( args: MkosiArgs, root: Path, cmd: Sequence[PathString], network: bool = False, env: Optional[Mapping[str, str]] = None, nspawn_params: Optional[List[str]] = None, capture_stdout: bool = False, check: bool = True, ) -> CompletedProcess: nspawn = [ nspawn_executable(), "--quiet", f"--directory={root}", "--uuid=" + args.machine_id, "--machine=mkosi-" + uuid.uuid4().hex, "--as-pid2", "--register=no", f"--bind={var_tmp(root)}:/var/tmp", "--setenv=SYSTEMD_OFFLINE=1", *nspawn_rlimit_params(), ] stdout = None if network: # If we're using the host network namespace, use the same resolver nspawn += ["--bind-ro=/etc/resolv.conf"] else: nspawn += ["--private-network"] if env: nspawn += [f"--setenv={k}={v}" for k, v in env.items()] if "workspace-command" in ARG_DEBUG: nspawn += ["--setenv=SYSTEMD_LOG_LEVEL=debug"] if args.image_id: nspawn += [f"--setenv=IMAGE_ID={args.image_id}"] if args.image_version: nspawn += [f"--setenv=IMAGE_VERSION={args.image_version}"] if nspawn_params: nspawn += nspawn_params if capture_stdout: stdout = subprocess.PIPE nspawn += ["--console=pipe"] if args.usr_only: nspawn += [f"--bind={root_home(args, root)}:/root"] if args.nspawn_keep_unit: nspawn += ["--keep-unit"] try: return run([*nspawn, "--", *cmd], check=check, stdout=stdout, text=capture_stdout) except subprocess.CalledProcessError as e: if "workspace-command" in ARG_DEBUG: run(nspawn, check=False) die(f"Workspace command {shell_join(cmd)} returned non-zero exit code {e.returncode}.")
5,638
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, debug=None, check_finite=True): """ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. Parameters ---------- a : (M, M) array_like A triangular matrix b : (M,) or (M, N) array_like Right-hand side matrix in `a x = b` lower : bool, optional Use only data contained in the lower triangle of `a`. Default is to use upper triangle. trans : {0, 1, 2, 'N', 'T', 'C'}, optional Type of system to solve: ======== ========= trans system ======== ========= 0 or 'N' a x = b 1 or 'T' a^T x = b 2 or 'C' a^H x = b ======== ========= unit_diagonal : bool, optional If True, diagonal elements of `a` are assumed to be 1 and will not be referenced. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, N) ndarray Solution to the system `a x = b`. Shape of return matches `b`. Raises ------ LinAlgError If `a` is singular Notes ----- .. versionadded:: 0.9.0 Examples -------- Solve the lower triangular system a x = b, where:: [3 0 0 0] [4] a = [2 1 0 0] b = [2] [1 0 1 0] [4] [1 1 1 1] [2] >>> from scipy.linalg import solve_triangular >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]]) >>> b = np.array([4, 2, 4, 2]) >>> x = solve_triangular(a, b, lower=True) >>> x array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333]) >>> a.dot(x) # Check the result array([ 4., 2., 4., 2.]) """ # Deprecate keyword "debug" if debug is not None: warn('Use of the "debug" keyword is deprecated ' 'and this keyword will be removed in the future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') if a1.shape[0] != b1.shape[0]: raise ValueError('incompatible dimensions') overwrite_b = overwrite_b or _datacopied(b1, b) if debug: print('solve:overwrite_b=', overwrite_b) trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) trtrs, = get_lapack_funcs(('trtrs',), (a1, b1)) if a1.flags.f_contiguous: x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, trans=trans, unitdiag=unit_diagonal) else: x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=lower, trans=not trans, unitdiag=unit_diagonal) if info == 0: return x if info > 0: raise LinAlgError("singular matrix: resolution failed at diagonal %d" % (info-1)) raise ValueError('illegal value in %d-th argument of internal trtrs' % (-info))
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, overwrite_b=False, debug=None, check_finite=True): """ Solve the equation `a x = b` for `x`, assuming a is a triangular matrix. Parameters ---------- a : (M, M) array_like A triangular matrix b : (M,) or (M, N) array_like Right-hand side matrix in `a x = b` lower : bool, optional Use only data contained in the lower triangle of `a`. Default is to use upper triangle. trans : {0, 1, 2, 'N', 'T', 'C'}, optional Type of system to solve: ======== ========= trans system ======== ========= 0 or 'N' a x = b 1 or 'T' a^T x = b 2 or 'C' a^H x = b ======== ========= unit_diagonal : bool, optional If True, diagonal elements of `a` are assumed to be 1 and will not be referenced. overwrite_b : bool, optional Allow overwriting data in `b` (may enhance performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : (M,) or (M, N) ndarray Solution to the system `a x = b`. Shape of return matches `b`. Raises ------ LinAlgError If `a` is singular Notes ----- .. versionadded:: 0.9.0 Examples -------- Solve the lower triangular system a x = b, where:: [3 0 0 0] [4] a = [2 1 0 0] b = [2] [1 0 1 0] [4] [1 1 1 1] [2] >>> from scipy.linalg import solve_triangular >>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]]) >>> b = np.array([4, 2, 4, 2]) >>> x = solve_triangular(a, b, lower=True) >>> x array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333]) >>> a.dot(x) # Check the result array([ 4., 2., 4., 2.]) """ # Deprecate keyword "debug" if debug is not None: warn('Use of the "debug" keyword is deprecated ' 'and this keyword will be removed in the future ' 'versions of SciPy.', DeprecationWarning, stacklevel=2) a1 = _asarray_validated(a, check_finite=check_finite) b1 = _asarray_validated(b, check_finite=check_finite) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError('expected square matrix') if a1.shape[0] != b1.shape[0]: raise ValueError('incompatible dimensions') overwrite_b = overwrite_b or _datacopied(b1, b) if debug: print('solve:overwrite_b=', overwrite_b) trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) trtrs, = get_lapack_funcs(('trtrs',), (a1, b1)) if a1.flags.f_contiguous: x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, trans=trans, unitdiag=unit_diagonal) else: x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower, trans=not trans, unitdiag=unit_diagonal) if info == 0: return x if info > 0: raise LinAlgError("singular matrix: resolution failed at diagonal %d" % (info-1)) raise ValueError('illegal value in %d-th argument of internal trtrs' % (-info))
44,987
def _running_with_backend() -> bool: """ Determine if running in context of a backend. This is always true when running using the `CloudTaskRunner`. Returns: - bool: if `_running_with_backend` is set in context """ return bool(prefect.context.get("running_with_backend"))
def _running_with_backend() -> bool: """ Determine if running in context of a backend. This is always true when running using the `CloudTaskRunner`. Returns: - bool: if `running_with_backend` is set in context """ return bool(prefect.context.get("running_with_backend"))
30,496
def find_indicators_to_limit(indicator_query: str, limit: int) -> list: """ Finds indicators using demisto.findIndicators """ iocs, _ = find_indicators_to_limit_loop(indicator_query, limit) return iocs[:limit]
def find_indicators_with_limit(indicator_query: str, limit: int) -> list: """ Finds indicators using demisto.findIndicators """ iocs, _ = find_indicators_to_limit_loop(indicator_query, limit) return iocs[:limit]
46,017
def build_laplacian_pyramid( input: torch.Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[torch.Tensor]: r"""Construct the Laplacian pyramid for an image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """ if not isinstance(input, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not len(input.shape) == 4: raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid
def build_laplacian_pyramid( input: Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False ) -> List[torch.Tensor]: r"""Construct the Laplacian pyramid for an image. .. image:: _static/img/build_pyramid.png The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in it's Gaussian pyramid. Args: input : the tensor to be used to construct the pyramid. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. align_corners: interpolation flag. Shape: - Input: :math:`(B, C, H, W)` - Output :math:`[(B, C, H, W), (B, C, H/2, W/2), ...]` """ if not isinstance(input, torch.Tensor): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not len(input.shape) == 4: raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}") if not isinstance(max_level, int) or max_level < 0: raise ValueError(f"Invalid max_level, it must be a positive integer. Got: {max_level}") # create gaussian pyramid gaussian_pyramid: List[torch.Tensor] = build_pyramid(input, max_level) # create empty list laplacian_pyramid: List[torch.Tensor] = [] for i in range(max_level - 1): img_expand: torch.Tensor = pyrup(gaussian_pyramid[i + 1]) laplacian: torch.Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid
783
def rumba_deconv_global(data, kernel, mask, n_iter=600, recon_type='smf', n_coils=1, R=1, use_tv=True, verbose=False): ''' Fit fODF for a all voxels simultaneously using RUMBA-SD. Deconvolves the kernel from the diffusion-weighted signal at each voxel by computing a maximum likelihood estimation of the fODF [1]_. Global fitting also permits the use of total variation regularization (RUMBA-SD + TV). The spatial dependence introduced by TV promotes smoother solutions (i.e. prevents oscillations), while still allowing for sharp discontinuities [2]_. This promots smoothness and continuity along individual tracts while preventing smoothing of adjacent tracts. Generally, global_fit will proceed more quickly than the voxelwise fit provided that the computer has adequate RAM (>= 16 GB will be more than sufficient.). Parameters ---------- data : 4d ndarray (x, y, z, N) Signal values for entire brain. None of the volume dimensions x, y, z can be 1 if TV regularization is required. kernel : 2d ndarray (N, M) Deconvolution kernel mapping volume fractions of the M compartments to N-length signal. Last two columns should be for GM and CSF. mask : 3d ndarray(x, y, z) Binary mask specifying voxels of interest with 1; fODF will only be fit at these voxels (0 elsewhere). n_iter : int, optional Number of iterations for fODF estimation. Must be a positive int. Default: 600 recon_type : {'smf', 'sos'}, optional MRI reconstruction method: spatial matched filter (SMF) or sum-of-squares (SoS). SMF reconstruction generates Rician noise while SoS reconstruction generates Noncentral Chi noise. Default: 'smf' n_coils : int, optional Number of coils in MRI scanner -- only relevant in SoS reconstruction. Must be a positive int. Default: 1 use_tv : bool, optional If true, applies total variation regularization. This requires a brain volume with no singleton dimensions. Default: True verbose : bool, optional If true, logs updates on estimated signal-to-noise ratio after each iteration. Default: False Returns ------- fodf : 4d ndarray (x, y, z, M-1) fODF computed for each voxel. f_gm : 3d ndarray (x, y, z) GM volume fraction at each voxel. f_csf : 3d ndarray (x, y, z) CSF volume fraction at each voxel. f_wm : 3d ndarray (x, y, z) White matter volume fraction at each voxel. f_iso : 3d ndarray (x, y, z) Isotropic volume fraction at each voxel (GM + CSF) combined : 4d ndarray (x, y, z, M-1) fODF combined with isotropic compartment for each voxel. Notes ----- TV modifies our cost function as follows: $ J(\bold{f}) = -\log{P(\bold{S}|\bold{H}, \bold{f}, \sigma^2, n)}) + \alpha_{TV}TV(\bold{f}) $ where the first term is the negative log likelihood described in the notes of `rumba_deconv`, and the second term is the TV energy, or the sum of gradient absolute values for the fODF across the entire brain. This results in a new multiplicative factor in the iterative scheme, now becoming: $ \bold{f}^{k+1} = \bold{f}^k \circ \frac{\bold{H}^T\left[\bold{S}\circ \frac{I_n(\bold{S}\circ\bold{Hf}^k/\sigma^2)} {I_{n-1}(\bold{S}\circ \bold{Hf}^k/\sigma^2)} \right ]} {\bold{H}^T\bold{Hf}^k}\circ\bold{R}^k $ where $\bold{R}^k$ is computed voxelwise by: $ (\bold{R}^k)_j = \frac{1}{1 - \alpha_{TV}div\left(\frac{\triangledown[ \bold{f}^k_{3D}]_j}{\lvert\triangledown[\bold{f}^k_{3D}]_j \rvert} \right)\biggr\rvert_{x, y, z}} $ Here, $\triangledown$ is the symbol for the 3D gradient at any voxel. The regularization strength, $\alpha_{TV}$ is updated after each iteration by the discrepancy principle -- specifically, it is selected to match the estimated variance after each iteration [3]_. References ---------- .. [1] Canales-Rodríguez, E. J., Daducci, A., Sotiropoulos, S. N., Caruyer, E., Aja-Fernández, S., Radua, J., Mendizabal, J. M. Y., Iturria-Medina, Y., Melie-García, L., Alemán-Gómez, Y., Thiran, J.-P., Sarró, S., Pomarol-Clotet, E., & Salvador, R. (2015). Spherical Deconvolution of Multichannel Diffusion MRI Data with Non-Gaussian Noise Models and Spatial Regularization. PLOS ONE, 10(10), e0138910. https://doi.org/10.1371/journal.pone.0138910 .. [2] Rudin, L. I., Osher, S., & Fatemi, E. (1992). Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1), 259–268. https://doi.org/10.1016/0167-2789(92)90242-F .. [3] Chambolle A. An algorithm for total variation minimization and applications. Journal of Mathematical Imaging and Vision. 2004; 20:89–97. ''' # Crop data to reduce memory consumption dim_orig = data.shape ixmin, ixmax = bounding_box(mask) data = crop(data, ixmin, ixmax) mask = crop(mask, ixmin, ixmax) if np.any(np.array(data.shape[:3]) == 1) and use_tv: raise ValueError("Cannot use TV regularization if any spatial" + "dimensions are 1; " + f"provided dimensions were {data.shape[:3]}") epsilon = 1e-7 n_grad = kernel.shape[0] # gradient directions n_comp = kernel.shape[1] # number of compartments dim = data.shape n_v_tot = np.prod(dim[:3]) # total number of voxels # Initial guess is iso-probable fodf0 = np.ones((n_comp, 1), dtype=np.float32) fodf0 = fodf0 / np.sum(fodf0, axis=0) if recon_type == "smf": n_order = 1 # Rician noise (same as Noncentral Chi with order 1) elif recon_type == "sos": n_order = n_coils # Noncentral Chi noise (order = # of coils) else: raise ValueError("Invalid recon_type. Should be 'smf' or 'sos', " + f"received f{recon_type}") mask_vec = np.ravel(mask) # Indices of target voxels index_mask = np.atleast_1d(np.squeeze(np.argwhere(mask_vec))) n_v_true = len(index_mask) # number of target voxels data_2d = np.zeros((n_v_true, n_grad), dtype=np.float32) for i in range(n_grad): data_2d[:, i] = np.ravel(data[:, :, :, i])[ index_mask] # only keep voxels of interest data_2d = data_2d.T fodf = np.tile(fodf0, (1, n_v_true)) reblurred = np.matmul(kernel, fodf) # For use later kernel_t = kernel.T f_zero = 0 # Initialize algorithm parameters sigma0 = 1/15 sigma2 = sigma0**2 tv_lambda = sigma2 # initial guess for TV regularization strength # Expand into matrix form for iterations sigma2 = sigma2 * np.ones(data_2d.shape, dtype=np.float32) tv_lambda_aux = np.zeros((n_v_tot), dtype=np.float32) reblurred_s = data_2d * reblurred / sigma2 for i in range(n_iter): fodf_i = fodf ratio = mbessel_ratio(n_order, reblurred_s).astype(np.float32) rl_factor = np.matmul(kernel_t, data_2d*ratio) / \ (np.matmul(kernel_t, reblurred) + _EPS) if use_tv: # apply TV regularization tv_factor = np.ones(fodf_i.shape, dtype=np.float32) fodf_4d = _reshape_2d_4d(fodf_i.T, mask) # Compute gradient, divergence gr = _grad(fodf_4d) d_inv = 1 / np.sqrt(epsilon**2 + np.sum(gr**2, axis=3)) gr_norm = (gr * d_inv[:, :, :, None, :]) div_f = _divergence(gr_norm) g0 = np.abs(1 - tv_lambda * div_f) tv_factor_4d = 1 / (g0 + _EPS) for j in range(n_comp): tv_factor_1d = np.ravel(tv_factor_4d[:, :, :, j])[index_mask] tv_factor[j, :] = tv_factor_1d # Apply TV regularization to iteration factor rl_factor = rl_factor * tv_factor fodf = fodf_i * rl_factor # result of iteration fodf = np.maximum(f_zero, fodf) # positivity constraint # Update other variables reblurred = np.matmul(kernel, fodf) reblurred_s = data_2d * reblurred / sigma2 # Iterate variance sigma2_i = (1 / (n_grad * n_order)) * \ np.sum((data_2d**2 + reblurred**2) / 2 - ( sigma2 * reblurred_s) * ratio, axis=0) sigma2_i = np.minimum((1 / 8)**2, np.maximum(sigma2_i, (1 / 80)**2)) if verbose: logger.info("Iteration %d of %d", i+1, n_iter) snr_mean = np.mean(1 / np.sqrt(sigma2_i)) snr_std = np.std(1 / np.sqrt(sigma2_i)) logger.info( "Mean SNR (S0/sigma) estimated to be %.3f +/- %.3f", snr_mean, snr_std) # Expand into matrix sigma2 = np.tile(sigma2_i[None, :], (data_2d.shape[0], 1)) # Update TV regularization strength using the discrepancy principle if use_tv: if R == 1: tv_lambda = np.mean(sigma2_i) if tv_lambda < (1/30)**2: tv_lambda = (1/30)**2 else: # different factor for each voxel tv_lambda_aux[index_mask] = sigma2_i tv_lambda = np.reshape(tv_lambda_aux, (*dim[:3], 1)) fodf = fodf.astype(np.float64) fodf = fodf / (np.sum(fodf, axis=0)[None, ...] + _EPS) # normalize fODF # Extract compartments fodf_4d = np.zeros((*dim_orig[:3], n_comp)) _reshape_2d_4d(fodf.T, mask, out=fodf_4d[ixmin[0]:ixmax[0], ixmin[1]:ixmax[1], ixmin[2]:ixmax[2]]) fodf = fodf_4d[:, :, :, :-2] # WM compartment f_gm = fodf_4d[:, :, :, -2] # GM compartment f_csf = fodf_4d[:, :, :, -1] # CSF compartment f_wm = np.sum(fodf, axis=3) # white matter volume fraction combined = fodf + (f_gm[..., None] + f_csf[..., None]) \ / fodf.shape[3] f_iso = f_gm + f_csf return fodf, f_gm, f_csf, f_wm, f_iso, combined
def rumba_deconv_global(data, kernel, mask, n_iter=600, recon_type='smf', n_coils=1, R=1, use_tv=True, verbose=False): ''' Fit fODF for all voxels simultaneously using RUMBA-SD. Deconvolves the kernel from the diffusion-weighted signal at each voxel by computing a maximum likelihood estimation of the fODF [1]_. Global fitting also permits the use of total variation regularization (RUMBA-SD + TV). The spatial dependence introduced by TV promotes smoother solutions (i.e. prevents oscillations), while still allowing for sharp discontinuities [2]_. This promots smoothness and continuity along individual tracts while preventing smoothing of adjacent tracts. Generally, global_fit will proceed more quickly than the voxelwise fit provided that the computer has adequate RAM (>= 16 GB will be more than sufficient.). Parameters ---------- data : 4d ndarray (x, y, z, N) Signal values for entire brain. None of the volume dimensions x, y, z can be 1 if TV regularization is required. kernel : 2d ndarray (N, M) Deconvolution kernel mapping volume fractions of the M compartments to N-length signal. Last two columns should be for GM and CSF. mask : 3d ndarray(x, y, z) Binary mask specifying voxels of interest with 1; fODF will only be fit at these voxels (0 elsewhere). n_iter : int, optional Number of iterations for fODF estimation. Must be a positive int. Default: 600 recon_type : {'smf', 'sos'}, optional MRI reconstruction method: spatial matched filter (SMF) or sum-of-squares (SoS). SMF reconstruction generates Rician noise while SoS reconstruction generates Noncentral Chi noise. Default: 'smf' n_coils : int, optional Number of coils in MRI scanner -- only relevant in SoS reconstruction. Must be a positive int. Default: 1 use_tv : bool, optional If true, applies total variation regularization. This requires a brain volume with no singleton dimensions. Default: True verbose : bool, optional If true, logs updates on estimated signal-to-noise ratio after each iteration. Default: False Returns ------- fodf : 4d ndarray (x, y, z, M-1) fODF computed for each voxel. f_gm : 3d ndarray (x, y, z) GM volume fraction at each voxel. f_csf : 3d ndarray (x, y, z) CSF volume fraction at each voxel. f_wm : 3d ndarray (x, y, z) White matter volume fraction at each voxel. f_iso : 3d ndarray (x, y, z) Isotropic volume fraction at each voxel (GM + CSF) combined : 4d ndarray (x, y, z, M-1) fODF combined with isotropic compartment for each voxel. Notes ----- TV modifies our cost function as follows: $ J(\bold{f}) = -\log{P(\bold{S}|\bold{H}, \bold{f}, \sigma^2, n)}) + \alpha_{TV}TV(\bold{f}) $ where the first term is the negative log likelihood described in the notes of `rumba_deconv`, and the second term is the TV energy, or the sum of gradient absolute values for the fODF across the entire brain. This results in a new multiplicative factor in the iterative scheme, now becoming: $ \bold{f}^{k+1} = \bold{f}^k \circ \frac{\bold{H}^T\left[\bold{S}\circ \frac{I_n(\bold{S}\circ\bold{Hf}^k/\sigma^2)} {I_{n-1}(\bold{S}\circ \bold{Hf}^k/\sigma^2)} \right ]} {\bold{H}^T\bold{Hf}^k}\circ\bold{R}^k $ where $\bold{R}^k$ is computed voxelwise by: $ (\bold{R}^k)_j = \frac{1}{1 - \alpha_{TV}div\left(\frac{\triangledown[ \bold{f}^k_{3D}]_j}{\lvert\triangledown[\bold{f}^k_{3D}]_j \rvert} \right)\biggr\rvert_{x, y, z}} $ Here, $\triangledown$ is the symbol for the 3D gradient at any voxel. The regularization strength, $\alpha_{TV}$ is updated after each iteration by the discrepancy principle -- specifically, it is selected to match the estimated variance after each iteration [3]_. References ---------- .. [1] Canales-Rodríguez, E. J., Daducci, A., Sotiropoulos, S. N., Caruyer, E., Aja-Fernández, S., Radua, J., Mendizabal, J. M. Y., Iturria-Medina, Y., Melie-García, L., Alemán-Gómez, Y., Thiran, J.-P., Sarró, S., Pomarol-Clotet, E., & Salvador, R. (2015). Spherical Deconvolution of Multichannel Diffusion MRI Data with Non-Gaussian Noise Models and Spatial Regularization. PLOS ONE, 10(10), e0138910. https://doi.org/10.1371/journal.pone.0138910 .. [2] Rudin, L. I., Osher, S., & Fatemi, E. (1992). Nonlinear total variation based noise removal algorithms. Physica D: Nonlinear Phenomena, 60(1), 259–268. https://doi.org/10.1016/0167-2789(92)90242-F .. [3] Chambolle A. An algorithm for total variation minimization and applications. Journal of Mathematical Imaging and Vision. 2004; 20:89–97. ''' # Crop data to reduce memory consumption dim_orig = data.shape ixmin, ixmax = bounding_box(mask) data = crop(data, ixmin, ixmax) mask = crop(mask, ixmin, ixmax) if np.any(np.array(data.shape[:3]) == 1) and use_tv: raise ValueError("Cannot use TV regularization if any spatial" + "dimensions are 1; " + f"provided dimensions were {data.shape[:3]}") epsilon = 1e-7 n_grad = kernel.shape[0] # gradient directions n_comp = kernel.shape[1] # number of compartments dim = data.shape n_v_tot = np.prod(dim[:3]) # total number of voxels # Initial guess is iso-probable fodf0 = np.ones((n_comp, 1), dtype=np.float32) fodf0 = fodf0 / np.sum(fodf0, axis=0) if recon_type == "smf": n_order = 1 # Rician noise (same as Noncentral Chi with order 1) elif recon_type == "sos": n_order = n_coils # Noncentral Chi noise (order = # of coils) else: raise ValueError("Invalid recon_type. Should be 'smf' or 'sos', " + f"received f{recon_type}") mask_vec = np.ravel(mask) # Indices of target voxels index_mask = np.atleast_1d(np.squeeze(np.argwhere(mask_vec))) n_v_true = len(index_mask) # number of target voxels data_2d = np.zeros((n_v_true, n_grad), dtype=np.float32) for i in range(n_grad): data_2d[:, i] = np.ravel(data[:, :, :, i])[ index_mask] # only keep voxels of interest data_2d = data_2d.T fodf = np.tile(fodf0, (1, n_v_true)) reblurred = np.matmul(kernel, fodf) # For use later kernel_t = kernel.T f_zero = 0 # Initialize algorithm parameters sigma0 = 1/15 sigma2 = sigma0**2 tv_lambda = sigma2 # initial guess for TV regularization strength # Expand into matrix form for iterations sigma2 = sigma2 * np.ones(data_2d.shape, dtype=np.float32) tv_lambda_aux = np.zeros((n_v_tot), dtype=np.float32) reblurred_s = data_2d * reblurred / sigma2 for i in range(n_iter): fodf_i = fodf ratio = mbessel_ratio(n_order, reblurred_s).astype(np.float32) rl_factor = np.matmul(kernel_t, data_2d*ratio) / \ (np.matmul(kernel_t, reblurred) + _EPS) if use_tv: # apply TV regularization tv_factor = np.ones(fodf_i.shape, dtype=np.float32) fodf_4d = _reshape_2d_4d(fodf_i.T, mask) # Compute gradient, divergence gr = _grad(fodf_4d) d_inv = 1 / np.sqrt(epsilon**2 + np.sum(gr**2, axis=3)) gr_norm = (gr * d_inv[:, :, :, None, :]) div_f = _divergence(gr_norm) g0 = np.abs(1 - tv_lambda * div_f) tv_factor_4d = 1 / (g0 + _EPS) for j in range(n_comp): tv_factor_1d = np.ravel(tv_factor_4d[:, :, :, j])[index_mask] tv_factor[j, :] = tv_factor_1d # Apply TV regularization to iteration factor rl_factor = rl_factor * tv_factor fodf = fodf_i * rl_factor # result of iteration fodf = np.maximum(f_zero, fodf) # positivity constraint # Update other variables reblurred = np.matmul(kernel, fodf) reblurred_s = data_2d * reblurred / sigma2 # Iterate variance sigma2_i = (1 / (n_grad * n_order)) * \ np.sum((data_2d**2 + reblurred**2) / 2 - ( sigma2 * reblurred_s) * ratio, axis=0) sigma2_i = np.minimum((1 / 8)**2, np.maximum(sigma2_i, (1 / 80)**2)) if verbose: logger.info("Iteration %d of %d", i+1, n_iter) snr_mean = np.mean(1 / np.sqrt(sigma2_i)) snr_std = np.std(1 / np.sqrt(sigma2_i)) logger.info( "Mean SNR (S0/sigma) estimated to be %.3f +/- %.3f", snr_mean, snr_std) # Expand into matrix sigma2 = np.tile(sigma2_i[None, :], (data_2d.shape[0], 1)) # Update TV regularization strength using the discrepancy principle if use_tv: if R == 1: tv_lambda = np.mean(sigma2_i) if tv_lambda < (1/30)**2: tv_lambda = (1/30)**2 else: # different factor for each voxel tv_lambda_aux[index_mask] = sigma2_i tv_lambda = np.reshape(tv_lambda_aux, (*dim[:3], 1)) fodf = fodf.astype(np.float64) fodf = fodf / (np.sum(fodf, axis=0)[None, ...] + _EPS) # normalize fODF # Extract compartments fodf_4d = np.zeros((*dim_orig[:3], n_comp)) _reshape_2d_4d(fodf.T, mask, out=fodf_4d[ixmin[0]:ixmax[0], ixmin[1]:ixmax[1], ixmin[2]:ixmax[2]]) fodf = fodf_4d[:, :, :, :-2] # WM compartment f_gm = fodf_4d[:, :, :, -2] # GM compartment f_csf = fodf_4d[:, :, :, -1] # CSF compartment f_wm = np.sum(fodf, axis=3) # white matter volume fraction combined = fodf + (f_gm[..., None] + f_csf[..., None]) \ / fodf.shape[3] f_iso = f_gm + f_csf return fodf, f_gm, f_csf, f_wm, f_iso, combined
44,932
def test_get_flow_form_file_azure_runs(monkeypatch): client = MagicMock(download_blob=MagicMock()) service = MagicMock(get_blob_client=MagicMock(return_value=client)) monkeypatch.setattr( "prefect.environments.storage.Azure._azure_block_blob_service", service ) f = Flow("test") monkeypatch.setattr( "prefect.environments.storage.azure.extract_flow_from_file", MagicMock(return_value=f), ) storage = Azure(container="container", stored_as_script=True) assert f.name not in storage flow_location = storage.add_flow(f) new_flow = storage.get_flow(flow_location) assert client.download_blob.called assert f.name in storage assert isinstance(new_flow, Flow) assert new_flow.name == "test" assert len(new_flow.tasks) == 0 state = new_flow.run() assert state.is_successful()
def test_get_flow_from_file_azure_runs(monkeypatch): client = MagicMock(download_blob=MagicMock()) service = MagicMock(get_blob_client=MagicMock(return_value=client)) monkeypatch.setattr( "prefect.environments.storage.Azure._azure_block_blob_service", service ) f = Flow("test") monkeypatch.setattr( "prefect.environments.storage.azure.extract_flow_from_file", MagicMock(return_value=f), ) storage = Azure(container="container", stored_as_script=True) assert f.name not in storage flow_location = storage.add_flow(f) new_flow = storage.get_flow(flow_location) assert client.download_blob.called assert f.name in storage assert isinstance(new_flow, Flow) assert new_flow.name == "test" assert len(new_flow.tasks) == 0 state = new_flow.run() assert state.is_successful()
24,385
def section_validator(sections, loader, file_name, *prev_sections): sections_display = ', '.join(prev_sections) if sections_display: sections_display += ', ' validations = [ Validation(key='name'), Validation(key='header_level', type=int), Validation(key='tab', type=str, required=False), Validation(key='description'), Validation(key='parameters', type=dict, required=False), Validation(key='prepend_text', type=str, required=False), Validation(key='append_text', type=str, required=False), Validation(key='processor', type=str, required=False), Validation(key='hidden', type=bool, required=False), Validation(key='overrides', type=list, required=False), ] overrides = {} override_errors = [] # load base parameters once base_params = load_manifest(loader.source) base_params['check_name'] = base_params['integration_id'] base_params['service_checks'] = load_service_checks(loader.source) section_names_origin = {} for section_index, section in enumerate(sections, 1): if not isinstance(section, dict): loader.errors.append( '{}, {}, {}section #{}: section attribute must be a mapping object'.format( loader.source, file_name, sections_display, section_index ) ) continue # expand and override all templates within the section templates_resolved = False while 'template' in section: overrides.update(section.pop('overrides', {})) try: template = loader.templates.load(section.pop('template')) except Exception as e: loader.errors.append(f'{loader.source}, {file_name}, {sections_display}section #{section_index}: {e}') break errors = loader.templates.apply_overrides(template, overrides) if errors: override_errors.append((section_index, errors)) if isinstance(template, dict): template.update(section) section = template sections[section_index - 1] = template elif isinstance(template, list): if template: section = template[0] for item_index, template_item in enumerate(template): sections.insert(section_index + item_index, template_item) # Delete what's at the current index sections.pop(section_index - 1) # Perform this check once again if not isinstance(section, dict): loader.errors.append( '{}, {}, {}section #{}: Template section must be a mapping object'.format( loader.source, file_name, sections_display, section_index ) ) break else: loader.errors.append( '{}, {}, {}section #{}: Template refers to an empty array'.format( loader.source, file_name, sections_display, section_index ) ) break else: loader.errors.append( '{}, {}, {}section #{}: Template does not refer to a mapping object nor array'.format( loader.source, file_name, sections_display, section_index ) ) break # Only set upon success or if there were no templates else: templates_resolved = True if not templates_resolved: continue MISSING = ( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: ' f'Every section must contain a `{{key}}` attribute' ) INVALID = ( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: ' f'Attribute `{{key}}` must be a {{type}}' ) # now validate the expanded section object _validate(section, validations, loader, MISSING, INVALID) if loader.errors: return section_name = section['name'] if section_name in section_names_origin: loader.errors.append( '{}, {}, {}section #{}: section name `{}` already used by section #{}'.format( loader.source, file_name, sections_display, section_index, section_name, section_names_origin[section_name], ) ) else: section_names_origin[section_name] = section_index if loader.errors: return # perform parameter expansion on the description text # first check if there are any fields to be replaced description = section['description'] def on_indent_parse_error(value, spec): loader.errors.append( '{}, {}, {}section #{}: Could not parse indent level in format spec `{}`'.format( loader.source, file_name, sections_display, section_index, spec, ) ) formatter = ParamsFormatter(on_indent_parse_error) if len(list(formatter.parse(description))) > 1: params = copy.deepcopy(section['parameters']) if params: # perform parameter expansion for any parameter values for k, v in params.items(): if v is not None: params[k] = v.format(**base_params) params.update(base_params) else: params = base_params section['description'] = formatter.format(description, **params) if 'sections' in section: nested_sections = section['sections'] if not isinstance(nested_sections, list): loader.errors.append( '{}, {}, {}{}: Attribute `sections` must be a list'.format( loader.source, file_name, sections_display, section_name ) ) continue previous_sections = list(prev_sections) previous_sections.append(section_name) section_validator(nested_sections, loader, file_name, *previous_sections) # If there are unused overrides, add the associated error messages if overrides: for section_index, errors in override_errors: error_message = '\n'.join(errors) loader.errors.append( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: {error_message}' )
def section_validator(sections, loader, file_name, *prev_sections): sections_display = ', '.join(prev_sections) if sections_display: sections_display += ', ' validations = [ Validation(key='name'), Validation(key='header_level', type=int), Validation(key='tab', type=str, required=False), Validation(key='description'), Validation(key='parameters', type=dict, required=False), Validation(key='prepend_text', type=str, required=False), Validation(key='append_text', type=str, required=False), Validation(key='processor', type=str, required=False), Validation(key='hidden', type=bool, required=False), Validation(key='overrides', type=list, required=False), ] overrides = {} override_errors = [] # load base parameters once base_params = load_manifest(loader.source) base_params['check_name'] = base_params['integration_id'] base_params['service_checks'] = load_service_checks(loader.source) section_names_origin = {} for section_index, section in enumerate(sections, 1): if not isinstance(section, dict): loader.errors.append( '{}, {}, {}section #{}: section attribute must be a mapping object'.format( loader.source, file_name, sections_display, section_index ) ) continue # expand and override all templates within the section templates_resolved = False while 'template' in section: overrides.update(section.pop('overrides', {})) try: template = loader.templates.load(section.pop('template')) except Exception as e: loader.errors.append(f'{loader.source}, {file_name}, {sections_display}section #{section_index}: {e}') break errors = loader.templates.apply_overrides(template, overrides) if errors: override_errors.append((section_index, errors)) if isinstance(template, dict): template.update(section) section = template sections[section_index - 1] = template elif isinstance(template, list): if template: section = template[0] for item_index, template_item in enumerate(template): sections.insert(section_index + item_index, template_item) # Delete what's at the current index sections.pop(section_index - 1) # Perform this check once again if not isinstance(section, dict): loader.errors.append( '{}, {}, {}section #{}: Template section must be a mapping object'.format( loader.source, file_name, sections_display, section_index ) ) break else: loader.errors.append( '{}, {}, {}section #{}: Template refers to an empty array'.format( loader.source, file_name, sections_display, section_index ) ) break else: loader.errors.append( '{}, {}, {}section #{}: Template does not refer to a mapping object nor array'.format( loader.source, file_name, sections_display, section_index ) ) break # Only set upon success or if there were no templates else: templates_resolved = True if not templates_resolved: continue MISSING = ( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: ' f'Every section must contain a `{{key}}` attribute' ) INVALID = ( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: ' f'Attribute `{{key}}` must be a {{type}}' ) # now validate the expanded section object _validate(section, validations, loader, MISSING, INVALID) if loader.errors: return section_name = section['name'] if section_name in section_names_origin: loader.errors.append( '{}, {}, {}section #{}: section name `{}` already used by section #{}'.format( loader.source, file_name, sections_display, section_index, section_name, section_names_origin[section_name], ) ) else: section_names_origin[section_name] = section_index if loader.errors: return # perform parameter expansion on the description text # first check if there are any fields to be replaced description = section['description'] def on_indent_parse_error(value, spec): loader.errors.append( '{}, {}, {}section #{}: Could not parse indent level in format spec `{}`'.format( loader.source, file_name, sections_display, section_index, spec, ) ) formatter = ParamsFormatter(on_indent_parse_error) if len(list(formatter.parse(description))) > 1: params = copy.deepcopy(section['parameters']) if params: # perform parameter expansion for any parameter values for k, v in params.items(): if v is not None: params[k] = v.format(**base_params) params.update(base_params) else: params = base_params section['description'] = formatter.format(description, **params) if 'sections' in section: nested_sections = section['sections'] if not isinstance(nested_sections, list): loader.errors.append( '{}, {}, {}{}: Attribute `sections` must be an array'.format( loader.source, file_name, sections_display, section_name ) ) continue previous_sections = list(prev_sections) previous_sections.append(section_name) section_validator(nested_sections, loader, file_name, *previous_sections) # If there are unused overrides, add the associated error messages if overrides: for section_index, errors in override_errors: error_message = '\n'.join(errors) loader.errors.append( f'{loader.source}, {file_name}, {sections_display}section #{section_index}: {error_message}' )
21,644
def start(config_options): try: config = HomeServerConfig.load_config("Synapse worker", config_options) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) # For backwards compatibility let any of the old app names. assert config.worker_app in ( "synapse.app.appservice", "synapse.app.client_reader", "synapse.app.event_creator", "synapse.app.federation_reader", "synapse.app.federation_sender", "synapse.app.frontend_proxy", "synapse.app.generic_worker", "synapse.app.media_repository", "synapse.app.pusher", "synapse.app.synchrotron", "synapse.app.user_dir", ) if config.worker_app == "synapse.app.appservice": if config.appservice.notify_appservices: sys.stderr.write( "\nThe appservices must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``notify_appservices: false`` to the main config" "\n" ) sys.exit(1) # Force the appservice to start since they will be disabled in the main config config.appservice.notify_appservices = True else: # For other worker types we force this to off. config.appservice.notify_appservices = False if config.worker_app == "synapse.app.user_dir": if config.server.update_user_directory: sys.stderr.write( "\nThe update_user_directory must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``update_user_directory: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.update_user_directory = True else: # For other worker types we force this to off. config.server.update_user_directory = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.util.caches.lrucache.TRACK_MEMORY_USAGE = config.caches.track_memory_usage hs = GenericWorkerServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) setup_logging(hs, config, use_worker_options=True) hs.setup() # Ensure the replication streamer is always started in case we write to any # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() register_start(_base.start, hs, config.worker_listeners) _base.start_worker_reactor("synapse-generic-worker", config)
def start(config_options): try: config = HomeServerConfig.load_config("Synapse worker", config_options) except ConfigError as e: sys.stderr.write("\n" + str(e) + "\n") sys.exit(1) # For backwards compatibility let any of the old app names. assert config.worker_app in ( "synapse.app.appservice", "synapse.app.client_reader", "synapse.app.event_creator", "synapse.app.federation_reader", "synapse.app.federation_sender", "synapse.app.frontend_proxy", "synapse.app.generic_worker", "synapse.app.media_repository", "synapse.app.pusher", "synapse.app.synchrotron", "synapse.app.user_dir", ) if config.worker_app == "synapse.app.appservice": if config.appservice.notify_appservices: sys.stderr.write( "\nThe appservices must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``notify_appservices: false`` to the main config" "\n" ) sys.exit(1) # Force the appservice to start since they will be disabled in the main config config.appservice.notify_appservices = True else: # For other worker types we force this to off. config.appservice.notify_appservices = False if config.worker_app == "synapse.app.user_dir": if config.server.update_user_directory: sys.stderr.write( "\nThe update_user_directory must be disabled in the main synapse process" "\nbefore they can be run in a separate worker." "\nPlease add ``update_user_directory: false`` to the main config" "\n" ) sys.exit(1) # Force the pushers to start since they will be disabled in the main config config.server.update_user_directory = True else: # For other worker types we force this to off. config.server.update_user_directory = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage hs = GenericWorkerServer( config.server_name, config=config, version_string="Synapse/" + get_version_string(synapse), ) setup_logging(hs, config, use_worker_options=True) hs.setup() # Ensure the replication streamer is always started in case we write to any # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() register_start(_base.start, hs, config.worker_listeners) _base.start_worker_reactor("synapse-generic-worker", config)
31,064
def main(): install_logging('Destroy_instances.log') circle_aritfact = sys.argv[1] env_file = sys.argv[2] instance_role = sys.argv[3] time_to_live = sys.argv[4] with open(env_file, 'r') as json_file: env_results = json.load(json_file) filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role] for env in filtered_results: logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '"sudo chmod -R 755 /var/log/demisto"' scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"' try: logging.debug(f'Trying to run {ssh_string}') subprocess.check_output( ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True) except subprocess.CalledProcessError as exc: logging.exception(exc.output) try: server_ip = env["InstanceDNS"].split('.')[0] subprocess.check_output( scp_string.format( env["SSHuser"], env["InstanceDNS"], "{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)), shell=True) except subprocess.CalledProcessError as exc: logging.exception(exc.output) if time_to_live: logging.info(f'Skipping - Time to live was set to {time_to_live} minutes') continue if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))): logging.info(f'Destroying instance {env.get("Role", "Unknown role")}') rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"]) if aws_functions.isError(rminstance): logging.error(rminstance) else: logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
def main(): install_logging('Destroy_instances.log') circle_aritfact = sys.argv[1] env_file = sys.argv[2] instance_role = sys.argv[3] time_to_live = sys.argv[4] with open(env_file, 'r') as json_file: env_results = json.load(json_file) filtered_results = [env_result for env_result in env_results if env_result["Role"] == instance_role] for env in filtered_results: logging.info(f'Downloading server log from {env.get("Role", "Unknown role")}') ssh_string = 'ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {}@{} ' \ '"sudo chmod -R 755 /var/log/demisto"' scp_string = 'scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ '{}@{}:/var/log/demisto/server.log {} || echo "WARN: Failed downloading server.log"' try: logging.debug(f'Trying to run {ssh_string}') subprocess.check_output( ssh_string.format(env["SSHuser"], env["InstanceDNS"]), shell=True) except subprocess.CalledProcessError as exc: logging.exception(f'Failed changing permissions of folder /var/log/demisto on server {env["InstanceDNS"]}') try: server_ip = env["InstanceDNS"].split('.')[0] subprocess.check_output( scp_string.format( env["SSHuser"], env["InstanceDNS"], "{}/server_{}_{}.log".format(circle_aritfact, env["Role"].replace(' ', ''), server_ip)), shell=True) except subprocess.CalledProcessError as exc: logging.exception(exc.output) if time_to_live: logging.info(f'Skipping - Time to live was set to {time_to_live} minutes') continue if os.path.isfile("./Tests/is_build_passed_{}.txt".format(env["Role"].replace(' ', ''))): logging.info(f'Destroying instance {env.get("Role", "Unknown role")}') rminstance = aws_functions.destroy_instance(env["Region"], env["InstanceID"]) if aws_functions.isError(rminstance): logging.error(rminstance) else: logging.warning(f'Tests failed on {env.get("Role", "Unknown role")}, keeping instance alive')
30,109
def test_multi_index_load_from_directory_3_simple_bad_file(runtmp): # check that force=False fails properly when confrunted with non-JSON # files. c = runtmp with open(runtmp.output('badsig.sig'), 'wt') as fp: fp.write('bad content.') with pytest.raises(ValueError): mi = MultiIndex.load_from_directory(runtmp.location, force=False)
def test_multi_index_load_from_directory_3_simple_bad_file(runtmp): # check that force=False fails properly when confronted with non-JSON # files. c = runtmp with open(runtmp.output('badsig.sig'), 'wt') as fp: fp.write('bad content.') with pytest.raises(ValueError): mi = MultiIndex.load_from_directory(runtmp.location, force=False)
22,133
def _logic_explain(rules, ev, rule_data): """ Explains whe the logic denied the check-in. Only works for a denied check-in. While our custom check-in logic is very flexible, its main problem is that is is pretty intransparent during execution. If the logic causes an entry to be forbidden, the result of the logic evaluation is just a simple ``False``, which is very unhelpful to explain to attendees why they don't get into the event. The main problem with fixing this is that there is no correct answer for this, it is always up for interpretation. A good example is the following set of rules: - Attendees with a regular ticket can enter the venue between 09:00 and 17:00 on three days - Attendees with a VIP ticket can enter the venue between 08:00 and 18:00 on three days If an attendee with a regular ticket now shows up at 17:30 on the first day, there are three possible error messages: a) You do not have a VIP ticket b) You can only get in before 17:00 c) You can only get in after 09:00 tomorrow All three of them are just as valid, and "fixing" either one of them would get the attendee in. Showing all three is too much, especially since the list can get very long with complex logic. We therefore make an opinionated choice based on a number of assumptions. An example for these assumptions is "it is very unlikely that the attendee is unable to change their ticket type". Additionally, we favor a "close failure". Therefore, in the above example, we'd show "You can only get in before 17:00". In the middle of the night it would switch to "You can only get in after 09:00". """ logic_environment = _get_logic_environment(ev) _var_values = {'False': False, 'True': True} _var_explanations = {} # Step 1: To simplify things later, we replace every operator of the rule that # is NOT a boolean operator (AND and OR in our case) with the evaluation result. def _evaluate_inners(r): if r is True: return {'var': 'True'} if r is False: return {'var': 'False'} if not isinstance(r, dict): return r operator = list(r.keys())[0] values = r[operator] if operator in ("and", "or"): return {operator: [_evaluate_inners(v) for v in values]} result = logic_environment.apply(r, rule_data) new_var_name = f'v{len(_var_values)}' _var_values[new_var_name] = result if not result: # Operator returned false, let's dig deeper if "var" not in values[0]: raise ValueError("Binary operators should be normalized to have a variable on their left-hand side") if isinstance(values[0]["var"], list): values[0]["var"] = values[0]["var"][0] _var_explanations[new_var_name] = { 'operator': operator, 'var': values[0]["var"], 'rhs': values[1:], } return {'var': new_var_name} try: rules = _evaluate_inners(rules) except ValueError: return _('Unknown reason') # Step 2: Transform the the logic into disjunctive normal form (max. one level of ANDs nested in max. one level # of ORs), e.g. `(a AND b AND c) OR (d AND e)` rules = convert_to_dnf(rules) # Step 3: Split into the various paths to truthiness, e.g. ``[[a, b, c], [d, e]]`` for our sample above paths = [] if "and" in rules: # only one path paths.append([v["var"] for v in rules["and"]]) elif "or" in rules: # multiple paths for r in rules["or"]: if "and" in r: paths.append([v["var"] for v in r["and"]]) else: paths.append([r["var"]]) else: # only one expression on only one path paths.append([rules["var"]]) # Step 4: For every variable with value False, compute a weight. The weight is a 2-tuple of numbers. # The first component indicates a "rigidness level". The higher the rigidness, the less likely it is that the # outcome is determined by some action of the attendee. For example, the number of entries has a very low # rigidness since the attendee decides how often they enter. The current time has a medium rigidness # since the attendee decides when they show up. The product has a high rigidness, since customers usually # can't change what type of ticket they have. # The second component indicates the "error size". For example for a date comparision this would be the number of # seconds between the two dates. # Additionally, we compute a text for every variable. var_weights = { 'False': (100000, 0), # used during testing 'True': (100000, 0), # used during testing } var_texts = { 'False': 'Always false', # used during testing 'True': 'Always true', # used during testing } for vname, data in _var_explanations.items(): var, operator, rhs = data['var'], data['operator'], data['rhs'] if var == 'now': compare_to = _build_time(*rhs[0]['buildTime'], ev=ev).astimezone(ev.timezone) tolerance = timedelta(minutes=float(rhs[1])) if len(rhs) > 1 and rhs[1] else timedelta(seconds=0) if operator == 'isBefore': compare_to += tolerance else: compare_to -= tolerance var_weights[vname] = (200, abs(now() - compare_to).total_seconds()) if abs(now() - compare_to) < timedelta(hours=12): compare_to_text = date_format(compare_to, 'TIME_FORMAT') else: compare_to_text = date_format(compare_to, 'SHORT_DATETIME_FORMAT') if operator == 'isBefore': var_texts[vname] = _('Only allowed before {datetime}').format(datetime=compare_to_text) elif operator == 'isAfter': var_texts[vname] = _('Only allowed after {datetime}').format(datetime=compare_to_text) elif var == 'product' or var == 'variation': var_weights[vname] = (1000, 0) var_texts[vname] = _('Ticket type not allowed') elif var in ('entries_number', 'entries_today', 'entries_days'): w = { 'entries_days': 100, 'entries_number': 120, 'entries_today': 140, } l = { 'entries_days': _('number of days with an entry'), 'entries_number': _('number of entries'), 'entries_today': _('number of entries today'), } compare_to = rhs[0] var_weights[vname] = (w[var], abs(compare_to - rule_data[var])) if operator == '==': var_texts[vname] = _('{variable} is not {value}').format(variable=l[var], value=compare_to) elif operator in ('<', '<='): var_texts[vname] = _('Maximum {variable} exceeded').format(variable=l[var]) elif operator in ('>', '>='): var_texts[vname] = _('Minimum {variable} exceeded').format(variable=l[var]) elif operator == '!=': var_texts[vname] = _('{variable} is {value}').format(variable=l[var], value=compare_to) else: raise ValueError(f'Unknown variable {var}') # Step 5: For every path, compute the maximum weight path_weights = [ max([ var_weights[v] for v in path if not _var_values[v] ] or [(0, 0)]) for path in paths ] # Step 6: Find the paths with the minimum weight min_weight = min(path_weights) paths_with_min_weight = [ p for i, p in enumerate(paths) if path_weights[i] == min_weight ] # Finally, return the text for one of them return ', '.join(var_texts[v] for v in paths_with_min_weight[0] if not _var_values[v])
def _logic_explain(rules, ev, rule_data): """ Explains whe the logic denied the check-in. Only works for a denied check-in. While our custom check-in logic is very flexible, its main problem is that it is pretty intransparent during execution. If the logic causes an entry to be forbidden, the result of the logic evaluation is just a simple ``False``, which is very unhelpful to explain to attendees why they don't get into the event. The main problem with fixing this is that there is no correct answer for this, it is always up for interpretation. A good example is the following set of rules: - Attendees with a regular ticket can enter the venue between 09:00 and 17:00 on three days - Attendees with a VIP ticket can enter the venue between 08:00 and 18:00 on three days If an attendee with a regular ticket now shows up at 17:30 on the first day, there are three possible error messages: a) You do not have a VIP ticket b) You can only get in before 17:00 c) You can only get in after 09:00 tomorrow All three of them are just as valid, and "fixing" either one of them would get the attendee in. Showing all three is too much, especially since the list can get very long with complex logic. We therefore make an opinionated choice based on a number of assumptions. An example for these assumptions is "it is very unlikely that the attendee is unable to change their ticket type". Additionally, we favor a "close failure". Therefore, in the above example, we'd show "You can only get in before 17:00". In the middle of the night it would switch to "You can only get in after 09:00". """ logic_environment = _get_logic_environment(ev) _var_values = {'False': False, 'True': True} _var_explanations = {} # Step 1: To simplify things later, we replace every operator of the rule that # is NOT a boolean operator (AND and OR in our case) with the evaluation result. def _evaluate_inners(r): if r is True: return {'var': 'True'} if r is False: return {'var': 'False'} if not isinstance(r, dict): return r operator = list(r.keys())[0] values = r[operator] if operator in ("and", "or"): return {operator: [_evaluate_inners(v) for v in values]} result = logic_environment.apply(r, rule_data) new_var_name = f'v{len(_var_values)}' _var_values[new_var_name] = result if not result: # Operator returned false, let's dig deeper if "var" not in values[0]: raise ValueError("Binary operators should be normalized to have a variable on their left-hand side") if isinstance(values[0]["var"], list): values[0]["var"] = values[0]["var"][0] _var_explanations[new_var_name] = { 'operator': operator, 'var': values[0]["var"], 'rhs': values[1:], } return {'var': new_var_name} try: rules = _evaluate_inners(rules) except ValueError: return _('Unknown reason') # Step 2: Transform the the logic into disjunctive normal form (max. one level of ANDs nested in max. one level # of ORs), e.g. `(a AND b AND c) OR (d AND e)` rules = convert_to_dnf(rules) # Step 3: Split into the various paths to truthiness, e.g. ``[[a, b, c], [d, e]]`` for our sample above paths = [] if "and" in rules: # only one path paths.append([v["var"] for v in rules["and"]]) elif "or" in rules: # multiple paths for r in rules["or"]: if "and" in r: paths.append([v["var"] for v in r["and"]]) else: paths.append([r["var"]]) else: # only one expression on only one path paths.append([rules["var"]]) # Step 4: For every variable with value False, compute a weight. The weight is a 2-tuple of numbers. # The first component indicates a "rigidness level". The higher the rigidness, the less likely it is that the # outcome is determined by some action of the attendee. For example, the number of entries has a very low # rigidness since the attendee decides how often they enter. The current time has a medium rigidness # since the attendee decides when they show up. The product has a high rigidness, since customers usually # can't change what type of ticket they have. # The second component indicates the "error size". For example for a date comparision this would be the number of # seconds between the two dates. # Additionally, we compute a text for every variable. var_weights = { 'False': (100000, 0), # used during testing 'True': (100000, 0), # used during testing } var_texts = { 'False': 'Always false', # used during testing 'True': 'Always true', # used during testing } for vname, data in _var_explanations.items(): var, operator, rhs = data['var'], data['operator'], data['rhs'] if var == 'now': compare_to = _build_time(*rhs[0]['buildTime'], ev=ev).astimezone(ev.timezone) tolerance = timedelta(minutes=float(rhs[1])) if len(rhs) > 1 and rhs[1] else timedelta(seconds=0) if operator == 'isBefore': compare_to += tolerance else: compare_to -= tolerance var_weights[vname] = (200, abs(now() - compare_to).total_seconds()) if abs(now() - compare_to) < timedelta(hours=12): compare_to_text = date_format(compare_to, 'TIME_FORMAT') else: compare_to_text = date_format(compare_to, 'SHORT_DATETIME_FORMAT') if operator == 'isBefore': var_texts[vname] = _('Only allowed before {datetime}').format(datetime=compare_to_text) elif operator == 'isAfter': var_texts[vname] = _('Only allowed after {datetime}').format(datetime=compare_to_text) elif var == 'product' or var == 'variation': var_weights[vname] = (1000, 0) var_texts[vname] = _('Ticket type not allowed') elif var in ('entries_number', 'entries_today', 'entries_days'): w = { 'entries_days': 100, 'entries_number': 120, 'entries_today': 140, } l = { 'entries_days': _('number of days with an entry'), 'entries_number': _('number of entries'), 'entries_today': _('number of entries today'), } compare_to = rhs[0] var_weights[vname] = (w[var], abs(compare_to - rule_data[var])) if operator == '==': var_texts[vname] = _('{variable} is not {value}').format(variable=l[var], value=compare_to) elif operator in ('<', '<='): var_texts[vname] = _('Maximum {variable} exceeded').format(variable=l[var]) elif operator in ('>', '>='): var_texts[vname] = _('Minimum {variable} exceeded').format(variable=l[var]) elif operator == '!=': var_texts[vname] = _('{variable} is {value}').format(variable=l[var], value=compare_to) else: raise ValueError(f'Unknown variable {var}') # Step 5: For every path, compute the maximum weight path_weights = [ max([ var_weights[v] for v in path if not _var_values[v] ] or [(0, 0)]) for path in paths ] # Step 6: Find the paths with the minimum weight min_weight = min(path_weights) paths_with_min_weight = [ p for i, p in enumerate(paths) if path_weights[i] == min_weight ] # Finally, return the text for one of them return ', '.join(var_texts[v] for v in paths_with_min_weight[0] if not _var_values[v])
5,691
def binomtest(k, n, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. The binomial test [1]_ is a test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Details of the test can be found in many texts on statistics, such as section 24.5 of [2]_. Parameters ---------- k : int The number of successes. n : int The number of trials. p : float, optional The hypothesized probability of success, i.e. the expected proportion of successes. The value must be in the interval ``0 <= p <= 1``. The default value is ``p = 0.5``. alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- result : `BinomTestResult` instance The return value is an object with the following attributes: k : int The number of successes (copied from `binomtest` input). n : int The number of trials (copied from `binomtest` input). alternative : str Indicates the alternative hypothesis specified in the input to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, or ``'less'``. pvalue : float The p-value of the hypothesis test. proportion_estimate : float The estimate of the proportion of successes. The object has the following methods: proportion_ci(confidence_level=0.95, method='exact') : Compute the confidence interval for ``proportion_estimate``. Notes ----- .. versionadded:: 1.7.0 References ---------- .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), Prentice Hall, Upper Saddle River, New Jersey USA (2010) Examples -------- >>> from scipy.stats import binomtest A car manufacturer claims that no more than 10% of their cars are unsafe. 15 cars are inspected for safety, 3 were found to be unsafe. Test the manufacturer's claim: >>> result = binomtest(3, n=15, p=0.1, alternative='greater') >>> result.pvalue 0.18406106910639114 The null hypothesis cannot be rejected at the 5% level of significance because the returned p-value is greater than the critical value of 5%. The estimated proportion is simply ``3/15``: >>> result.proportion_estimate 0.2 We can use the `proportion_ci()` method of the result to compute the confidence interval of the estimate: >>> result.proportion_ci(confidence_level=0.95) ConfidenceInterval(low=0.056846867590246826, high=1.0) """ k = _validate_int(k, 'k', minimum=0) n = _validate_int(n, 'n', minimum=1) if k > n: raise ValueError('k must not be greater than n.') if not (0 <= p <= 1): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized; \n" "must be 'two-sided', 'less' or 'greater'") a_fn = lambda x1:binom.pmf(x1,n,p) if alternative == 'less': pval = binom.cdf(k, n, p) elif alternative == 'greater': pval = binom.sf(k-1, n, p) else: # alternative is 'two-sided' d = binom.pmf(k, n, p) rerr = 1 + 1e-7 if k == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif k < p * n: y = n-binary_search_for_binom_tst(a_fn,d*rerr,np.ceil(p * n),n)+1 pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) else: y = binary_search_for_binom_tst(a_fn,d*rerr,0,np.floor(p*n) + 1,True)+1 pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) pval = min(1.0, pval) result = BinomTestResult(k=k, n=n, alternative=alternative, proportion_estimate=k/n, pvalue=pval) return result
def binomtest(k, n, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. The binomial test [1]_ is a test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Details of the test can be found in many texts on statistics, such as section 24.5 of [2]_. Parameters ---------- k : int The number of successes. n : int The number of trials. p : float, optional The hypothesized probability of success, i.e. the expected proportion of successes. The value must be in the interval ``0 <= p <= 1``. The default value is ``p = 0.5``. alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- result : `BinomTestResult` instance The return value is an object with the following attributes: k : int The number of successes (copied from `binomtest` input). n : int The number of trials (copied from `binomtest` input). alternative : str Indicates the alternative hypothesis specified in the input to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``, or ``'less'``. pvalue : float The p-value of the hypothesis test. proportion_estimate : float The estimate of the proportion of successes. The object has the following methods: proportion_ci(confidence_level=0.95, method='exact') : Compute the confidence interval for ``proportion_estimate``. Notes ----- .. versionadded:: 1.7.0 References ---------- .. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test .. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition), Prentice Hall, Upper Saddle River, New Jersey USA (2010) Examples -------- >>> from scipy.stats import binomtest A car manufacturer claims that no more than 10% of their cars are unsafe. 15 cars are inspected for safety, 3 were found to be unsafe. Test the manufacturer's claim: >>> result = binomtest(3, n=15, p=0.1, alternative='greater') >>> result.pvalue 0.18406106910639114 The null hypothesis cannot be rejected at the 5% level of significance because the returned p-value is greater than the critical value of 5%. The estimated proportion is simply ``3/15``: >>> result.proportion_estimate 0.2 We can use the `proportion_ci()` method of the result to compute the confidence interval of the estimate: >>> result.proportion_ci(confidence_level=0.95) ConfidenceInterval(low=0.056846867590246826, high=1.0) """ k = _validate_int(k, 'k', minimum=0) n = _validate_int(n, 'n', minimum=1) if k > n: raise ValueError('k must not be greater than n.') if not (0 <= p <= 1): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized; \n" "must be 'two-sided', 'less' or 'greater'") a_fn = lambda x1: binom.pmf(x1, n, p) if alternative == 'less': pval = binom.cdf(k, n, p) elif alternative == 'greater': pval = binom.sf(k-1, n, p) else: # alternative is 'two-sided' d = binom.pmf(k, n, p) rerr = 1 + 1e-7 if k == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif k < p * n: y = n-binary_search_for_binom_tst(a_fn,d*rerr,np.ceil(p * n),n)+1 pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p) else: y = binary_search_for_binom_tst(a_fn,d*rerr,0,np.floor(p*n) + 1,True)+1 pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p) pval = min(1.0, pval) result = BinomTestResult(k=k, n=n, alternative=alternative, proportion_estimate=k/n, pvalue=pval) return result
33,638
def get(object_ids, timeout=None): """Get a remote object or a list of remote objects from the object store. This method blocks until the object corresponding to the object ID is available in the local object store. If this object is not in the local object store, it will be shipped from an object store that has it (once the object has been created). If object_ids is a list, then the objects corresponding to each object in the list will be returned. This method will issue a warning if it's running inside async context, you can use ``await object_id`` instead of ``ray.get(object_id)``. For a list of object ids, you can use ``await asyncio.gather(*object_ids)``. Args: object_ids: Object ID of the object to get or a list of object IDs to get. timeout (Optional[float]): The maximum amount of time in seconds to wait before returning. Returns: A Python object or a list of Python objects. Raises: RayTimeoutError: A RayTimeoutError is raised if a timeout is set and the get takes longer than timeout to return. Exception: An exception is raised if the task that created the object or that created one of the objects raised an exception. """ worker = global_worker worker.check_connected() if hasattr( worker, "core_worker") and worker.core_worker.current_actor_is_asyncio(): global blocking_get_inside_async_warned if not blocking_get_inside_async_warned: logger.warning("Using blocking ray.get inside async actor. " "This blocks the event loop. Please " "use `await` on object id with asyncio.gather.") blocking_get_inside_async_warned = True with profiling.profile("ray.get"): is_individual_id = isinstance(object_ids, ray.ObjectID) if is_individual_id: object_ids = [object_ids] if not isinstance(object_ids, list): raise ValueError("'object_ids' must either be an object ID " "or a list of object IDs.") global last_task_error_raise_time # TODO(ujvl): Consider how to allow user to retrieve the ready objects. values = worker.get_objects(object_ids, timeout=timeout) for i, value in enumerate(values): if isinstance(value, RayError): last_task_error_raise_time = time.time() if isinstance(value, ray.exceptions.UnreconstructableError): worker.core_worker.dump_object_store_memory_usage() if isinstance(value, RayTaskError): raise value.as_instanceof_cause() else: raise value # Run post processors. for post_processor in worker._post_get_hooks: values = post_processor(object_ids, values) if is_individual_id: values = values[0] return values
def get(object_ids, timeout=None): """Get a remote object or a list of remote objects from the object store. This method blocks until the object corresponding to the object ID is available in the local object store. If this object is not in the local object store, it will be shipped from an object store that has it (once the object has been created). If object_ids is a list, then the objects corresponding to each object in the list will be returned. This method will issue a warning if it's running inside async context, you can use ``await object_id`` instead of ``ray.get(object_id)``. For a list of object ids, you can use ``await asyncio.gather(*object_ids)``. Args: object_ids: Object ID of the object to get or a list of object IDs to get. timeout (Optional[float]): The maximum amount of time in seconds to wait before returning. Returns: A Python object or a list of Python objects. Raises: RayTimeoutError: A RayTimeoutError is raised if a timeout is set and the get takes longer than timeout to return. Exception: An exception is raised if the task that created the object or that created one of the objects raised an exception. """ worker = global_worker worker.check_connected() if hasattr( worker, "core_worker") and worker.core_worker.current_actor_is_asyncio(): global blocking_get_inside_async_warned if not blocking_get_inside_async_warned: logger.warning("Using blocking ray.get inside async actor. " "This blocks the event loop. Please " "use `await` on object id with asyncio.gather if you want to yield execution to the event loop instead.") blocking_get_inside_async_warned = True with profiling.profile("ray.get"): is_individual_id = isinstance(object_ids, ray.ObjectID) if is_individual_id: object_ids = [object_ids] if not isinstance(object_ids, list): raise ValueError("'object_ids' must either be an object ID " "or a list of object IDs.") global last_task_error_raise_time # TODO(ujvl): Consider how to allow user to retrieve the ready objects. values = worker.get_objects(object_ids, timeout=timeout) for i, value in enumerate(values): if isinstance(value, RayError): last_task_error_raise_time = time.time() if isinstance(value, ray.exceptions.UnreconstructableError): worker.core_worker.dump_object_store_memory_usage() if isinstance(value, RayTaskError): raise value.as_instanceof_cause() else: raise value # Run post processors. for post_processor in worker._post_get_hooks: values = post_processor(object_ids, values) if is_individual_id: values = values[0] return values
25,676
def is_continuous_integration(): """ If the environment variable `DOCS` is set, always returns False. Otherwise, returns `True` if notebooks are executed on CICD, else `False`. """ if 'DOCS' in os.environ: return False return 'CI' in os.environ
def is_continuous_integration(): """ If the environment variable `DOCS` is set, always returns False. Otherwise, returns `True` if notebooks are executed on CI, else `False`. """ if 'DOCS' in os.environ: return False return 'CI' in os.environ
11,187
def run_formatters(written_files): """ Run the black formatter on the specified files. """ if shutil.which("black") is None: # Black is not installed. return subprocess.run(["black", *written_files], capture_output=True)
def run_formatters(written_files): """ Run the black formatter on the specified files. """ if shutil.which("black") is None: # Black is not installed. return subprocess.run(["black", "--", *written_files], capture_output=True)
30,337
def get_group_attributes(): """ Retrieve a Group’s Attributes """ group_type = demisto.args().get('group_type') group_id = int(demisto.args().get('group_id')) contents = [] headers = ['ID', 'Type', 'Value', 'DateAdded', 'LastModified', 'Displayed'] response = get_group_attributes_request(group_type, group_id) data = response.get('data', {}).get('attribute', []) if response.get('status') == 'Success': for attribute in data: contents.append({ 'ID': attribute.get('id'), 'Type': attribute.get('type'), 'Value': attribute.get('value'), 'DateAdded': attribute.get('dateAdded'), 'LastModified': attribute.get('lastModified'), 'Displayed': attribute.get('displayed') }) else: return_error(response.get('message')) context = { 'TC.Group(val.ID && val.ID === obj.ID)': contents } return_outputs( tableToMarkdown('Group Attributes', contents, headers, removeNull=True), context )
def get_group_attributes(): """ Retrieve a Group’s Attributes """ group_type = demisto.args().get('group_type') group_id = int(demisto.args().get('group_id')) contents = [] headers = ['ID', 'Type', 'Value', 'DateAdded', 'LastModified', 'Displayed'] response = get_group_attributes_request(group_type, group_id) data = response.get('data', {}).get('attribute', []) if response.get('status') == 'Success': for attribute in data: contents.append({ 'ID': attribute.get('id'), 'Type': attribute.get('type'), 'Value': attribute.get('value'), 'DateAdded': attribute.get('dateAdded'), 'LastModified': attribute.get('lastModified'), 'Displayed': attribute.get('displayed') }) else: return_error(response.get('message')) context = { 'TC.Group(val.ID && val.ID === obj.ID)': contents } return_outputs( tableToMarkdown('ThreatConnect Group Attributes', contents, headers, removeNull=True), context )
20,107
def _open_logfile(logfile): """ Open a file which may be gz|bz2 compressed. Uncompress based on extension. """ try: if logfile.endswith(".bz2"): return bz2.BZ2File(logfile) if logfile.endswith(".gz"): return gzip.open(logfile) return open(logfile, "rb") except IOError as msg: logger.error("open %s: %s" % (logfile, msg)) return None
def _open_logfile(logfile): """ Open a file which may be gz|bz2 compressed. Uncompress based on extension. """ try: if logfile.endswith(".bz2"): return bz2.BZ2File(logfile) if logfile.endswith(".gz"): return gzip.open(logfile) return open(logfile, "rb") except IOError as msg: logger.error("open %s: %s", (logfile, msg)) return None
8,191
def carrington_rotation_time(crot): """ Return the time of a given Carrington rotation. The round-trip from this method to `carrington_rotation_number` has absolute errors of < 0.11 seconds. Parameters ---------- crot : float Carrington rotation number. Can be a fractional cycle number. Returns ------- astropy.time.Time """ estimate = (constants.mean_synodic_period.to_value('day') * (crot - 1)) + constants.first_carrington_rotation # The above estimate is inaccurate (see comments below in carrington_rotation_number), # so put the estimate into carrington_rotation_number to determine a correction amount def refine(estimate): crot_estimate = carrington_rotation_number(estimate) dcrot = crot - crot_estimate # Correct the estimate using a linear fraction of the Carrington rotation period return estimate + (dcrot * constants.mean_synodic_period) # Perform two iterations of the correction to achieve sub-second accuracy estimate = refine(estimate) estimate = refine(estimate) t = Time(estimate, scale='tt', format='jd') t.format = 'iso' return t
def carrington_rotation_time(crot): """ Return the time of a given Carrington rotation. The round-trip from this method to `carrington_rotation_number` has absolute errors of < 0.11 seconds. Parameters ---------- crot : float Carrington rotation number. Can be a fractional cycle number. Returns ------- astropy.time.Time """ estimate = (constants.mean_synodic_period.to_value('day') * (crot - 1)) + constants.first_carrington_rotation # The above estimate is inaccurate (see comments below in carrington_rotation_number), # so put the estimate into carrington_rotation_number to determine a correction amount def refine(estimate): crot_estimate = carrington_rotation_number(estimate) dcrot = crot - crot_estimate # Correct the estimate using a linear fraction of the Carrington rotation period return estimate + (dcrot * constants.mean_synodic_period) # Perform two iterations of the correction to achieve sub-second accuracy estimate = refine(estimate) estimate = refine(estimate) t = Time(estimate, scale='tt', format='jd').utc t.format = 'iso' return t
40,158
def check_nonnegative_integers(X: Union[np.ndarray, sp_sparse.csr_matrix]): """Checks values of X to ensure it is count data """ data = X if type(X) is np.ndarray else X.data # Check no negatives if np.any(data < 0): return False # Check all are integers elif np.any(~np.equal(np.mod(data, 1), 0)): return False else: return True
def _check_nonnegative_integers(X: Union[np.ndarray, sp_sparse.spmatrix]): """Checks values of X to ensure it is count data """ data = X if type(X) is np.ndarray else X.data # Check no negatives if np.any(data < 0): return False # Check all are integers elif np.any(~np.equal(np.mod(data, 1), 0)): return False else: return True
32,304
def filter_incidents_by_duplicates_and_limit(incidents_res, last_run, fetch_limit, id_field): """ Removes duplicate incidents from response and returns the incidents till limit. The function should be called after getting the get-incidents API response, and by passing the id_field it will filter out the incidents that were already fetched by checking the incident IDs that are saved from the previous fetch in the last run object :type incidents_res: ``list`` :param incidents_res: The incidents from the API response :type last_run: ``dict`` :param last_run: The LastRun object :type fetch_limit: ``int`` :param fetch_limit: The incidents limit to return :type id_field: ``str`` :param id_field: The incident id field :return: List of incidents after filtering duplicates when len(incidents) <= limit :rtype: ``list`` """ found_incidents = last_run.get('found_incident_ids', {}) incidents = [] for incident in incidents_res: if incident[id_field] not in found_incidents: incidents.append(incident) return incidents[:fetch_limit]
def filter_incidents_by_duplicates_and_limit(incidents_res, last_run, fetch_limit, id_field): """ Removes duplicate incidents from response and returns the incidents till limit. The function should be called after getting the get-incidents API response, and by passing the id_field it will filter out the incidents that were already fetched by checking the incident IDs that are saved from the previous fetch in the last run object :type incidents_res: ``list`` :param incidents_res: The incidents from the raw API response :type last_run: ``dict`` :param last_run: The LastRun object :type fetch_limit: ``int`` :param fetch_limit: The incidents limit to return :type id_field: ``str`` :param id_field: The incident id field :return: List of incidents after filtering duplicates when len(incidents) <= limit :rtype: ``list`` """ found_incidents = last_run.get('found_incident_ids', {}) incidents = [] for incident in incidents_res: if incident[id_field] not in found_incidents: incidents.append(incident) return incidents[:fetch_limit]
27,797
def announce(version): """Generates a new release announcement entry in the docs.""" # Get our list of authors stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) stdout = stdout.decode("utf-8") last_version = stdout.strip() stdout = check_output(["git", "log", f"{last_version}..HEAD", "--format=%aN"]) stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) # remove strings within contributors that have substring "[bot]" for name in contributors: if "[bot]" in name: contributors.remove(name) template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst" ) template_text = ( Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8") ) contributors_text = "\n".join(f"* {name}" for name in sorted(contributors)) + "\n" text = template_text.format(version=version, contributors=contributors_text) target = Path(__file__).parent.joinpath(f"../doc/en/announce/release-{version}.rst") target.write_text(text, encoding="UTF-8") print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}") # Update index with the new release entry index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") lines = index_path.read_text(encoding="UTF-8").splitlines() indent = " " for index, line in enumerate(lines): if line.startswith(f"{indent}release-"): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" ) else: print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" ) break check_call(["git", "add", str(target)])
def announce(version): """Generates a new release announcement entry in the docs.""" # Get our list of authors stdout = check_output(["git", "describe", "--abbrev=0", "--tags"]) stdout = stdout.decode("utf-8") last_version = stdout.strip() stdout = check_output(["git", "log", f"{last_version}..HEAD", "--format=%aN"]) stdout = stdout.decode("utf-8") contributors = set(stdout.splitlines()) # remove strings within contributors that have substring "[bot]" for name in list(contributors): if "[bot]" in name: contributors.remove(name) template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst" ) template_text = ( Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8") ) contributors_text = "\n".join(f"* {name}" for name in sorted(contributors)) + "\n" text = template_text.format(version=version, contributors=contributors_text) target = Path(__file__).parent.joinpath(f"../doc/en/announce/release-{version}.rst") target.write_text(text, encoding="UTF-8") print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}") # Update index with the new release entry index_path = Path(__file__).parent.joinpath("../doc/en/announce/index.rst") lines = index_path.read_text(encoding="UTF-8").splitlines() indent = " " for index, line in enumerate(lines): if line.startswith(f"{indent}release-"): new_line = indent + target.stem if line != new_line: lines.insert(index, new_line) index_path.write_text("\n".join(lines) + "\n", encoding="UTF-8") print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" ) else: print( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" ) break check_call(["git", "add", str(target)])
33,552
def _prepare_request_for_sig_v4_signature( context: RequestContext, request_netloc: str ) -> AWSRequest: """ Prepare the request and reverse what S3SigV4QueryAuth does to allow signature calculation of the request see botocore.auth.SigV4QueryAuth :param context: RequestContext :return: Request """ request_headers = copy.deepcopy(context.request.headers) # set automatically by the handler chain, we don't want that request_headers.pop("Authorization", None) signed_headers = context.request.args.get("X-Amz-SignedHeaders") signature_headers = {} if uses_host_addressing(request_headers): request_headers["Host"] = request_headers.pop(S3_VIRTUAL_HOST_FORWARDED_HEADER, "") splitted_path = context.request.path.split("/", maxsplit=2) path = f"/{splitted_path[-1]}" else: path = context.request.path not_signed_headers = [] for header, value in request_headers.items(): header_low = header.lower() if header_low.startswith("x-amz-"): if header_low in IGNORED_SIGV4_HEADERS: continue if header_low not in signed_headers.lower(): not_signed_headers.append(header) if header_low in signed_headers: signature_headers[header] = value if not_signed_headers: ex: AccessDenied = create_access_denied_headers_not_signed(", ".join(not_signed_headers)) raise ex new_query_string_dict = { arg: value for arg, value in context.request.args.items() if arg != "X-Amz-Signature" } new_query_string = percent_encode_sequence(new_query_string_dict) # need to set path + query string as url for aws_request request_url = f"{context.request.scheme}://{request_netloc}{path}?{new_query_string}" aws_request = _create_aws_request(context, request_url=request_url, headers=signature_headers) return aws_request
def _prepare_request_for_sig_v4_signature( context: RequestContext, request_netloc: str ) -> AWSRequest: """ Prepare the request and reverse what S3SigV4QueryAuth does to allow signature calculation of the request see botocore.auth.SigV4QueryAuth :param context: RequestContext :return: Request """ request_headers = copy.copy(context.request.headers) # set automatically by the handler chain, we don't want that request_headers.pop("Authorization", None) signed_headers = context.request.args.get("X-Amz-SignedHeaders") signature_headers = {} if uses_host_addressing(request_headers): request_headers["Host"] = request_headers.pop(S3_VIRTUAL_HOST_FORWARDED_HEADER, "") splitted_path = context.request.path.split("/", maxsplit=2) path = f"/{splitted_path[-1]}" else: path = context.request.path not_signed_headers = [] for header, value in request_headers.items(): header_low = header.lower() if header_low.startswith("x-amz-"): if header_low in IGNORED_SIGV4_HEADERS: continue if header_low not in signed_headers.lower(): not_signed_headers.append(header) if header_low in signed_headers: signature_headers[header] = value if not_signed_headers: ex: AccessDenied = create_access_denied_headers_not_signed(", ".join(not_signed_headers)) raise ex new_query_string_dict = { arg: value for arg, value in context.request.args.items() if arg != "X-Amz-Signature" } new_query_string = percent_encode_sequence(new_query_string_dict) # need to set path + query string as url for aws_request request_url = f"{context.request.scheme}://{request_netloc}{path}?{new_query_string}" aws_request = _create_aws_request(context, request_url=request_url, headers=signature_headers) return aws_request
32,426
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults: '''Command handler for acquire command''' endpoint = args.get('endpoint', None) profile = args.get('profile', None) caseid = args.get('caseid', None) organization_id = args.get('organization_id', None) result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id) return CommandResults( outputs_prefix='Binalyze.Air.Acquisition', outputs_key_field='endpoint', outputs=result, )
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults: '''Command handler for acquire command''' endpoint = args.get('endpoint', '') profile = args.get('profile', None) caseid = args.get('caseid', None) organization_id = args.get('organization_id', None) result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id) return CommandResults( outputs_prefix='Binalyze.Air.Acquisition', outputs_key_field='endpoint', outputs=result, )
34,855
def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None: """Validates either the story structure or the entire project. Args: args: Commandline arguments stories_only: If `True`, only the story structure is validated. """ from rasa.validator import Validator config = rasa.cli.utils.get_validated_path( args.config, "config", DEFAULT_CONFIG_PATH, none_is_valid=True ) file_importer = TrainingDataImporter.load_from_config( domain_path=args.domain, training_data_paths=args.data, config_path=config ) validator = Validator.from_importer(file_importer) if stories_only: all_good = _validate_story_structure(validator, args) else: all_good = ( _validate_domain(validator) and _validate_nlu(validator, args) and _validate_story_structure(validator, args) ) telemetry.track_validate_files(all_good) if not all_good: rasa.shared.utils.cli.print_error_and_exit( "Project validation completed with errors." )
def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None: """Validates either the story structure or the entire project. Args: args: Commandline arguments stories_only: If `True`, only the story structure is validated. """ from rasa.validator import Validator config = rasa.cli.utils.get_validated_path( args.config, "config", DEFAULT_CONFIG_PATH, none_is_valid=True ) importer = TrainingDataImporter.load_from_config( domain_path=args.domain, training_data_paths=args.data, config_path=config ) validator = Validator.from_importer(file_importer) if stories_only: all_good = _validate_story_structure(validator, args) else: all_good = ( _validate_domain(validator) and _validate_nlu(validator, args) and _validate_story_structure(validator, args) ) telemetry.track_validate_files(all_good) if not all_good: rasa.shared.utils.cli.print_error_and_exit( "Project validation completed with errors." )
1,819
def _unique(values, *, return_inverse=False): """Helper function to find uniques with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- unique : ndarray The sorted uniique values unique_inverse : ndarray The indicies to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. """ if values.dtype == object: return _unique_python(values, return_inverse=return_inverse) # numerical return np.unique(values, return_inverse=return_inverse)
def _unique(values, *, return_inverse=False): """Helper function to find uniques with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- unique : ndarray The sorted uniique values unique_inverse : ndarray The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. """ if values.dtype == object: return _unique_python(values, return_inverse=return_inverse) # numerical return np.unique(values, return_inverse=return_inverse)
30,400
def users_to_entry(title, response, nex_page_token=None): context = [] for user_data in response: username = user_data.get('name').get('givenName') if user_data.get('name') \ and 'givenName' in user_data.get('name') else None display = user_data.get('name').get('fullName') if user_data.get('name') \ and 'fullName' in user_data.get('name') else None context.append({ 'Type': 'Google', 'ID': user_data.get('id'), 'UserName': username, 'Username': username, # adding to fit the new context standard 'DisplayName': display, 'Email': {'Address': user_data.get('primaryEmail')}, 'Gmail': {'Address': user_data.get('primaryEmail')}, 'Group': user_data.get('kind'), 'Groups': user_data.get('kind'), # adding to fit the new context standard 'CustomerId': user_data.get('customerId'), 'Domain': user_data.get('primaryEmail').split('@')[1], 'VisibleInDirectory': user_data.get('includeInGlobalAddressList'), }) headers = ['Type', 'ID', 'Username', 'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory', 'nextPageToken'] human_readable = tableToMarkdown(title, context, headers, removeNull=True) if nex_page_token: human_readable += "\nTo get further results, rerun the command with this page-token:\n" + nex_page_token return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': response, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context} }
def users_to_entry(title, response, next_page_token=None): context = [] for user_data in response: username = user_data.get('name').get('givenName') if user_data.get('name') \ and 'givenName' in user_data.get('name') else None display = user_data.get('name').get('fullName') if user_data.get('name') \ and 'fullName' in user_data.get('name') else None context.append({ 'Type': 'Google', 'ID': user_data.get('id'), 'UserName': username, 'Username': username, # adding to fit the new context standard 'DisplayName': display, 'Email': {'Address': user_data.get('primaryEmail')}, 'Gmail': {'Address': user_data.get('primaryEmail')}, 'Group': user_data.get('kind'), 'Groups': user_data.get('kind'), # adding to fit the new context standard 'CustomerId': user_data.get('customerId'), 'Domain': user_data.get('primaryEmail').split('@')[1], 'VisibleInDirectory': user_data.get('includeInGlobalAddressList'), }) headers = ['Type', 'ID', 'Username', 'DisplayName', 'Groups', 'CustomerId', 'Domain', 'OrganizationUnit', 'Email', 'VisibleInDirectory', 'nextPageToken'] human_readable = tableToMarkdown(title, context, headers, removeNull=True) if nex_page_token: human_readable += "\nTo get further results, rerun the command with this page-token:\n" + nex_page_token return { 'ContentsFormat': formats['json'], 'Type': entryTypes['note'], 'Contents': response, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': {'Account(val.ID && val.Type && val.ID == obj.ID && val.Type == obj.Type)': context} }
58,057
def convert_members_date_type(members): new_ls = [] for member in members: if isinstance(updated_at:=member['UpdatedAt'], datetime): member['UpdatedAt'] = updated_at.strftime('%Y-%m-%d %H:%M:%S') if isinstance(invited_at:=member['InvitedAt'], datetime): member['InvitedAt'] = invited_at.strftime('%Y-%m-%d %H:%M:%S') new_ls.append(member) return new_ls
def convert_members_date_type(members): new_ls = [] for member in members: if isinstance(updated_at:=member.get('UpdatedAt'), datetime): member['UpdatedAt'] = updated_at.isoformat() if isinstance(invited_at:=member['InvitedAt'], datetime): member['InvitedAt'] = invited_at.isoformat() new_ls.append(member) return new_ls
7,002
def prefixedNameToNamespaceLocalname( element: ModelObject, prefixedName: str, defaultNsmap: _NSMapArg | None = None ) -> tuple[str | None, str, str | None] | None: if prefixedName is None or prefixedName == "": return None names = prefixedName.partition(":") if names[2] == "": #default namespace prefix = None localName = names[0] else: prefix = names[0] localName = names[2] ns = xmlns(element, prefix) if ns is None: if prefix: assert isinstance(defaultNsmap, Mapping) if prefix in defaultNsmap: ns = defaultNsmap[prefix] else: return None # error, prefix not found return (ns, localName, prefix)
def prefixedNameToNamespaceLocalname( element: ModelObject, prefixedName: str, defaultNsmap: Mapping[str, str] | None = None ) -> tuple[str | None, str, str | None] | None: if prefixedName is None or prefixedName == "": return None names = prefixedName.partition(":") if names[2] == "": #default namespace prefix = None localName = names[0] else: prefix = names[0] localName = names[2] ns = xmlns(element, prefix) if ns is None: if prefix: assert isinstance(defaultNsmap, Mapping) if prefix in defaultNsmap: ns = defaultNsmap[prefix] else: return None # error, prefix not found return (ns, localName, prefix)
22,078
def _ntlm_challenge_extract(challenge): """ Extract host information in a NTLM_CHALLENGE message """ flags = struct.unpack('I', challenge[20:24])[0] # Get target name lntarget, offset = struct.unpack('HH', challenge[12:14] + challenge[16:18]) name = _extract_substr(challenge, offset, lntarget) value = "NTLM target:" + encode_b64(name).decode() # Multiple versions of NTLM Challenge messages exist (they can be deduced # thanks to the target offset) # V1: No context, no target information and no OS version are provided # - offset 32 # V2: Context and target informatio are provided but not the OS version # - offset 48 # V3: The context, target information and OS Version are all provided # - offset >= 56 # cf http://davenport.sourceforge.net/ntlm.html#osVersionStructure # Get OS Version if the version of NTLM handles it # and the `Negotiate version` flag is set if offset == 56 and flags & flag_version: maj, minor, bld, n_version = struct.unpack('BBHI', challenge[48:49] + challenge[49:50] + challenge[50:54] + challenge[54:56]) n_os = encode_b64("{}.{}.{}".format(maj, minor, bld).encode()).decode() value += ",ntlm-os:{},ntlm-version:{}".format(n_os, n_version,) # Get target information if the version of NTLM handles it # and the `Negotiate Target Info` is set if offset >= 48 and flags & flag_targetinfo: off, = struct.unpack('H', challenge[44:46]) challenge = challenge[off:] def _get_targetinfo(challenge): # Target info types : # - 1: NetBIOS Computer Name # - 2: NetBIOS Domain Name # - 3: DNS Computer Name # - 4: DNS Domain Name # - 5: DNS Tree Name typ = 0 while challenge and typ <= 5: typ, ln = struct.unpack('HH', challenge[0:2] + challenge[2:4]) if typ <= 5: value = _extract_substr(challenge, 4, ln) challenge = challenge[4 + ln:] yield value for k, v in zip(['domain', 'name', 'domain-dns', 'name-dns', 'tree-dns'], _get_targetinfo(challenge)): value += ",{}:{}".format(k, encode_b64(v).decode()) return value
def _ntlm_challenge_extract(challenge): """ Extract host information in an NTLM_CHALLENGE message """ flags = struct.unpack('I', challenge[20:24])[0] # Get target name lntarget, offset = struct.unpack('HH', challenge[12:14] + challenge[16:18]) name = _extract_substr(challenge, offset, lntarget) value = "NTLM target:" + encode_b64(name).decode() # Multiple versions of NTLM Challenge messages exist (they can be deduced # thanks to the target offset) # V1: No context, no target information and no OS version are provided # - offset 32 # V2: Context and target informatio are provided but not the OS version # - offset 48 # V3: The context, target information and OS Version are all provided # - offset >= 56 # cf http://davenport.sourceforge.net/ntlm.html#osVersionStructure # Get OS Version if the version of NTLM handles it # and the `Negotiate version` flag is set if offset == 56 and flags & flag_version: maj, minor, bld, n_version = struct.unpack('BBHI', challenge[48:49] + challenge[49:50] + challenge[50:54] + challenge[54:56]) n_os = encode_b64("{}.{}.{}".format(maj, minor, bld).encode()).decode() value += ",ntlm-os:{},ntlm-version:{}".format(n_os, n_version,) # Get target information if the version of NTLM handles it # and the `Negotiate Target Info` is set if offset >= 48 and flags & flag_targetinfo: off, = struct.unpack('H', challenge[44:46]) challenge = challenge[off:] def _get_targetinfo(challenge): # Target info types : # - 1: NetBIOS Computer Name # - 2: NetBIOS Domain Name # - 3: DNS Computer Name # - 4: DNS Domain Name # - 5: DNS Tree Name typ = 0 while challenge and typ <= 5: typ, ln = struct.unpack('HH', challenge[0:2] + challenge[2:4]) if typ <= 5: value = _extract_substr(challenge, 4, ln) challenge = challenge[4 + ln:] yield value for k, v in zip(['domain', 'name', 'domain-dns', 'name-dns', 'tree-dns'], _get_targetinfo(challenge)): value += ",{}:{}".format(k, encode_b64(v).decode()) return value
23,639
def pvsyst_cell(poa_global, temp_air, wind_speed=1.0, u_c=29.0, u_v=0.0, eta_m=0.1, alpha_absorption=0.9): r""" Calculate cell temperature using an empirical heat loss factor model as implemented in PVsyst. Parameters ---------- poa_global : numeric Total incident irradiance [W/m^2]. temp_air : numeric Ambient dry bulb temperature [C]. wind_speed : numeric, default 1.0 Wind speed in m/s measured at the same height for which the wind loss factor was determined. The default value 1.0 m/2 is the wind speed at module height used to determine NOCT. [m/s] u_c : float, default 29.0 Combined heat loss factor coefficient. The default value is representative of freestanding modules with the rear surfaces exposed to open air (e.g., rack mounted). Parameter :math:`U_{c}` in :eq:`pvsyst`. :math:`\left[\frac{\text{W}/{\text{m}^2}}{\text{C}}\right]` u_v : float, default 0.0 Combined heat loss factor influenced by wind. Parameter :math:`U_{v}` in :eq:`pvsyst`. :math:`\left[ \frac{\text{W}/\text{m}^2}{\text{C}\ \left( \text{m/s} \right)} \right]` eta_m : numeric, default 0.1 Module external efficiency as a fraction, i.e., :math:`(total\ DC\ power)/(poa\_global \times (number\ of\ cells \times cell\ area))`. Parameter :math:`\eta_{m}` in :eq:`pvsyst`. alpha_absorption : numeric, default 0.9 Absorption coefficient. Parameter :math:`\alpha` in :eq:`pvsyst`. Returns ------- numeric, values in degrees Celsius Notes ----- The Pvsyst model for cell temperature :math:`T_{C}` is given by .. math:: :label: pvsyst T_{C} = T_{a} + \frac{\alpha E (1 - \eta_{m})}{U_{c} + U_{v} \times WS} Inputs to the model are plane-of-array irradiance :math:`E` (W/m2), ambient air temperature :math:`T_{a}` (C) and wind speed :math:`WS` (m/s). Model output is cell temperature :math:`T_{C}`. Model parameters depend both on the module construction and its mounting. Parameters are provided in [1]_ for open (freestanding) and close (insulated) mounting configurations, , and are coded for convenience in :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`. The heat loss factors provided represent the combined effect of convection, radiation and conduction, and their values are experimentally determined. +--------------+---------------+---------------+ | Mounting | :math:`U_{c}` | :math:`U_{v}` | +==============+===============+===============+ | freestanding | 29.0 | 0.0 | +--------------+---------------+---------------+ | insulated | 15.0 | 0.0 | +--------------+---------------+---------------+ References ---------- .. [1] "PVsyst 6 Help", Files.pvsyst.com, 2018. [Online]. Available: http://files.pvsyst.com/help/index.html. [Accessed: 10- Dec- 2018]. .. [2] Faiman, D. (2008). "Assessing the outdoor operating temperature of photovoltaic modules." Progress in Photovoltaics 16(4): 307-315. Examples -------- >>> from pvlib.temperature import pvsyst_cell, TEMPERATURE_MODEL_PARAMETERS >>> params = TEMPERATURE_MODEL_PARAMETERS['pvsyst']['freestanding'] >>> pvsyst_cell(1000, 10, **params) 37.93103448275862 """ total_loss_factor = u_c + u_v * wind_speed heat_input = poa_global * alpha_absorption * (1 - eta_m) temp_difference = heat_input / total_loss_factor return temp_air + temp_difference
def pvsyst_cell(poa_global, temp_air, wind_speed=1.0, u_c=29.0, u_v=0.0, eta_m=0.1, alpha_absorption=0.9): r""" Calculate cell temperature using an empirical heat loss factor model as implemented in PVsyst. Parameters ---------- poa_global : numeric Total incident irradiance [W/m^2]. temp_air : numeric Ambient dry bulb temperature [C]. wind_speed : numeric, default 1.0 Wind speed in m/s measured at the same height for which the wind loss factor was determined. The default value 1.0 m/2 is the wind speed at module height used to determine NOCT. [m/s] u_c : float, default 29.0 Combined heat loss factor coefficient. The default value is representative of freestanding modules with the rear surfaces exposed to open air (e.g., rack mounted). Parameter :math:`U_{c}` in :eq:`pvsyst`. :math:`\left[\frac{\text{W}/{\text{m}^2}}{\text{C}}\right]` u_v : float, default 0.0 Combined heat loss factor influenced by wind. Parameter :math:`U_{v}` in :eq:`pvsyst`. :math:`\left[ \frac{\text{W}/\text{m}^2}{\text{C}\ \left( \text{m/s} \right)} \right]` eta_m : numeric, default 0.1 Module external efficiency as a fraction, i.e., :math:`(total\ DC\ power)/(poa\_global \times module\_area)` Parameter :math:`\eta_{m}` in :eq:`pvsyst`. alpha_absorption : numeric, default 0.9 Absorption coefficient. Parameter :math:`\alpha` in :eq:`pvsyst`. Returns ------- numeric, values in degrees Celsius Notes ----- The Pvsyst model for cell temperature :math:`T_{C}` is given by .. math:: :label: pvsyst T_{C} = T_{a} + \frac{\alpha E (1 - \eta_{m})}{U_{c} + U_{v} \times WS} Inputs to the model are plane-of-array irradiance :math:`E` (W/m2), ambient air temperature :math:`T_{a}` (C) and wind speed :math:`WS` (m/s). Model output is cell temperature :math:`T_{C}`. Model parameters depend both on the module construction and its mounting. Parameters are provided in [1]_ for open (freestanding) and close (insulated) mounting configurations, , and are coded for convenience in :data:`~pvlib.temperature.TEMPERATURE_MODEL_PARAMETERS`. The heat loss factors provided represent the combined effect of convection, radiation and conduction, and their values are experimentally determined. +--------------+---------------+---------------+ | Mounting | :math:`U_{c}` | :math:`U_{v}` | +==============+===============+===============+ | freestanding | 29.0 | 0.0 | +--------------+---------------+---------------+ | insulated | 15.0 | 0.0 | +--------------+---------------+---------------+ References ---------- .. [1] "PVsyst 6 Help", Files.pvsyst.com, 2018. [Online]. Available: http://files.pvsyst.com/help/index.html. [Accessed: 10- Dec- 2018]. .. [2] Faiman, D. (2008). "Assessing the outdoor operating temperature of photovoltaic modules." Progress in Photovoltaics 16(4): 307-315. Examples -------- >>> from pvlib.temperature import pvsyst_cell, TEMPERATURE_MODEL_PARAMETERS >>> params = TEMPERATURE_MODEL_PARAMETERS['pvsyst']['freestanding'] >>> pvsyst_cell(1000, 10, **params) 37.93103448275862 """ total_loss_factor = u_c + u_v * wind_speed heat_input = poa_global * alpha_absorption * (1 - eta_m) temp_difference = heat_input / total_loss_factor return temp_air + temp_difference
35,222
def isneginf(x, out=None): """Returns a bool array, where True if input element is negative infinity. Args: x (cupy.ndarray): Input array. Returns: cupy.ndarray: Boolean array of same shape as ``x``. Examples -------- >>> cupy.isneginf(0) False >>> cupy.isneginf([4, -4, numpy.inf, -numpy.inf]) [False, False, False, True] .. seealso:: :func:`numpy.isneginf` """ is_inf = cupy.isinf(x) try: signbit = cupy.signbit(x) except TypeError as e: dtype = cupy.asanyarray(x).dtype raise TypeError(f'This operation is not supported for {dtype} values ' 'as it would be uncertain.') from e else: return cupy.logical_and(is_inf, signbit, out=None)
def isneginf(x, out=None): """Returns a bool array, where True if input element is negative infinity. Args: x (cupy.ndarray): Input array. Returns: cupy.ndarray: Boolean array of same shape as ``x``. Examples -------- >>> cupy.isneginf(0) False >>> cupy.isneginf([4, -4, numpy.inf, -numpy.inf]) [False, False, False, True] .. seealso:: :func:`numpy.isneginf` """ is_inf = cupy.isinf(x) try: signbit = cupy.signbit(x) except TypeError as e: dtype = cupy.asanyarray(x).dtype raise TypeError(f'This operation is not supported for {dtype} values ' 'as it would be uncertain.') from e return cupy.logical_and(is_inf, signbit, out)
55,766
def _select_points_from_drag(layer, modify_selection: bool, n_display: int): """Select points on a Points layer after a drag event. Parameters ---------- layer : napari.layers.Points The points layer to select points on. modify_selection : bool Set to true if the selection should modify the current selected data in layer.selected_data. n_display : int The number of dimensions current being displayed """ if len(layer._view_data) > 0: # if there is data in view, find the points in the drag box if n_display == 2: selection = points_in_box( layer._drag_box, layer._view_data, layer._view_size ) else: selection = _points_in_box_3d( layer._drag_box, layer._view_data, layer._view_size, layer._drag_normal, layer._drag_up, ) # If shift combine drag selection with existing selected ones if modify_selection: new_selected = layer._indices_view[selection] target = set(layer.selected_data).symmetric_difference( set(new_selected) ) layer.selected_data = list(target) else: layer.selected_data = layer._indices_view[selection] else: layer.selected_data = set()
def _select_points_from_drag(layer, modify_selection: bool, n_display: int): """Select points on a Points layer after a drag event. Parameters ---------- layer : napari.layers.Points The points layer to select points on. modify_selection : bool Set to true if the selection should modify the current selected data in layer.selected_data. n_display : int The number of dimensions current being displayed """ if len(layer._view_data) > 0: layer.selected_data = set() return if n_display == 2: selection = points_in_box( layer._drag_box, layer._view_data, layer._view_size ) else: selection = _points_in_box_3d( layer._drag_box, layer._view_data, layer._view_size, layer._drag_normal, layer._drag_up, ) # If shift combine drag selection with existing selected ones if modify_selection: new_selected = layer._indices_view[selection] target = set(layer.selected_data).symmetric_difference( set(new_selected) ) layer.selected_data = list(target) else: layer.selected_data = layer._indices_view[selection] else: layer.selected_data = set()
43,871
def param_shift( tape, argnum=None, shift=np.pi / 2, gradient_recipes=None, fallback_fn=finite_diff, f0=None ): r"""Generate the parameter-shift tapes and postprocessing methods required to compute the gradient of an gate parameter with respect to an expectation value. Args: tape (.QuantumTape): quantum tape to differentiate argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipe`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. This is a tuple with one nested list per parameter. For parameter :math:`\phi_k`, the nested list contains elements of the form :math:`[c_i, a_i, s_i]` where :math:`i` is the index of the term, resulting in a gradient recipe of .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k + s_i). If ``None``, the default gradient recipe containing the two terms :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter. fallback_fn (None or Callable): a fallback grdient function to use for any parameters that do not support the parameter-shift rule. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the gradient recipe contains an unshifted term, this value is used, saving a quantum evaluation. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the evaluated tapes. For a variational evolution :math:`U(\mathbf{p}) \vert 0\rangle` with :math:`N` parameters :math:`\mathbf{p}`, consider the expectation value of an observable :math:`O`: .. math:: f(\mathbf{p}) = \langle \hat{O} \rangle(\mathbf{p}) = \langle 0 \vert U(\mathbf{p})^\dagger \hat{O} U(\mathbf{p}) \vert 0\rangle. The gradient of this expectation value can be calculated using :math:`2N` expectation values using the parameter-shift rule: .. math:: \frac{\partial f}{\partial \mathbf{p}} = \frac{1}{2\sin s} \left[ f(\mathbf{p} + s) - f(\mathbf{p} -s) \right]. **Gradients of variances** For a variational evolution :math:`U(\mathbf{p}) \vert 0\rangle` with :math:`N` parameters :math:`\mathbf{p}`, consider the variance of an observable :math:`O`: .. math:: g(\mathbf{p})=\langle \hat{O}^2 \rangle (\mathbf{p}) - [\langle \hat{O} \rangle(\mathbf{p})]^2. We can relate this directly to the parameter-shift rule by noting that .. math:: \frac{\partial g}{\partial \mathbf{p}}= \frac{\partial}{\partial \mathbf{p}} \langle \hat{O}^2 \rangle (\mathbf{p}) - 2 f(\mathbf{p}) \frac{\partial f}{\partial \mathbf{p}}. This results in :math:`4N + 1` evaluations. In the case where :math:`O` is involutory (:math:`\hat{O}^2 = I`), the first term in the above expression vanishes, and we are simply left with .. math:: \frac{\partial g}{\partial \mathbf{p}} = - 2 f(\mathbf{p}) \frac{\partial f}{\partial \mathbf{p}}, allowing us to compute the gradient using :math:`2N + 1` evaluations. **Example** >>> with qml.tape.QuantumTape() as tape: ... qml.RX(params[0], wires=0) ... qml.RY(params[1], wires=0) ... qml.RX(params[2], wires=0) ... qml.expval(qml.PauliZ(0)) ... qml.var(qml.PauliZ(0)) >>> tape.trainable_params = {0, 1, 2} >>> gradient_tapes, fn = qml.gradients.param_shift(tape) >>> res = dev.batch_execute(gradient_tapes) >>> fn(res) [[-0.38751721 -0.18884787 -0.38355704] [ 0.69916862 0.34072424 0.69202359]] """ # perform gradient method validation if any(m.return_type is qml.operation.State for m in tape.measurements): raise ValueError("Does not support circuits that return the state") _gradient_analysis(tape) gradient_tapes = [] # TODO: replace the JacobianTape._grad_method_validation # functionality before deprecation. method = "analytic" if fallback_fn is None else "best" diff_methods = tape._grad_method_validation(method) if not tape.trainable_params or all(g == "0" for g in diff_methods): # Either all parameters have grad method 0, or there are no trainable # parameters. return gradient_tapes, lambda x: np.zeros([tape.output_dim, len(tape.trainable_params)]) # TODO: replace the JacobianTape._choose_params_with_methods # functionality before deprecation. method_map = dict(tape._choose_params_with_methods(diff_methods, argnum)) # If there are unsupported operations, call the callback gradient function unsupported_params = {idx for idx, g in method_map.items() if g == "F"} if unsupported_params: g_tapes, fallback_proc_fn = fallback_fn(tape, argnum=unsupported_params) gradient_tapes.extend(g_tapes) fallback_len = len(g_tapes) # remove finite difference parameters from the method map method_map = {t_idx: dm for t_idx, dm in method_map.items() if dm != "F"} # Generate parameter-shift gradient tapes argnum = [i for i, dm in method_map.items() if dm == "A"] if gradient_recipes is None: gradient_recipes = [None] * len(argnum) if any(m.return_type is qml.operation.Variance for m in tape.measurements): g_tapes, fn = var_param_shift(tape, argnum, shift, gradient_recipes, f0) else: g_tapes, fn = expval_param_shift(tape, argnum, shift, gradient_recipes, f0) gradient_tapes.extend(g_tapes) if unsupported_params: # If there are unsupported parameters, we must process # the quantum results separately, once for the fallback # function and once for the parameter-shift rule, and recombine. def processing_fn(results): unsupported_grads = fallback_proc_fn(results[:fallback_len]) supported_grads = fn(results[fallback_len:]) return unsupported_grads + supported_grads else: processing_fn = fn return gradient_tapes, processing_fn
def param_shift( tape, argnum=None, shift=np.pi / 2, gradient_recipes=None, fallback_fn=finite_diff, f0=None ): r"""Generate the parameter-shift tapes and postprocessing methods required to compute the gradient of an gate parameter with respect to an expectation value. Args: tape (.QuantumTape): quantum tape to differentiate argnum (int or list[int] or None): Trainable parameter indices to differentiate with respect to. If not provided, the derivative with respect to all trainable indices are returned. shift (float): The shift value to use for the two-term parameter-shift formula. Only valid if the operation in question supports the two-term parameter-shift rule (that is, it has two distinct eigenvalues) and ``gradient_recipe`` is ``None``. gradient_recipes (tuple(list[list[float]] or None)): List of gradient recipes for the parameter-shift method. One gradient recipe must be provided per trainable parameter. This is a tuple with one nested list per parameter. For parameter :math:`\phi_k`, the nested list contains elements of the form :math:`[c_i, a_i, s_i]` where :math:`i` is the index of the term, resulting in a gradient recipe of .. math:: \frac{\partial}{\partial\phi_k}f = \sum_{i} c_i f(a_i \phi_k + s_i). If ``None``, the default gradient recipe containing the two terms :math:`[c_0, a_0, s_0]=[1/2, 1, \pi/2]` and :math:`[c_1, a_1, s_1]=[-1/2, 1, -\pi/2]` is assumed for every parameter. fallback_fn (None or Callable): a fallback grdient function to use for any parameters that do not support the parameter-shift rule. f0 (tensor_like[float] or None): Output of the evaluated input tape. If provided, and the gradient recipe contains an unshifted term, this value is used, saving a quantum evaluation. Returns: tuple[list[QuantumTape], function]: A tuple containing a list of generated tapes, in addition to a post-processing function to be applied to the evaluated tapes. For a variational evolution :math:`U(\mathbf{p}) \vert 0\rangle` with :math:`N` parameters :math:`\mathbf{p}`, consider the expectation value of an observable :math:`O`: .. math:: f(\mathbf{p}) = \langle \hat{O} \rangle(\mathbf{p}) = \langle 0 \vert U(\mathbf{p})^\dagger \hat{O} U(\mathbf{p}) \vert 0\rangle. The gradient of this expectation value can be calculated using :math:`2N` expectation values using the parameter-shift rule: .. math:: \frac{\partial f}{\partial \mathbf{p}} = \frac{1}{2\sin s} \left[ f(\mathbf{p} + s) - f(\mathbf{p} -s) \right]. **Gradients of variances** For a variational evolution :math:`U(\mathbf{p}) \vert 0\rangle` with :math:`N` parameters :math:`\mathbf{p}`, consider the variance of an observable :math:`O`: .. math:: g(\mathbf{p})=\langle \hat{O}^2 \rangle (\mathbf{p}) - [\langle \hat{O} \rangle(\mathbf{p})]^2. We can relate this directly to the parameter-shift rule by noting that .. math:: \frac{\partial g}{\partial \mathbf{p}}= \frac{\partial}{\partial \mathbf{p}} \langle \hat{O}^2 \rangle (\mathbf{p}) - 2 f(\mathbf{p}) \frac{\partial f}{\partial \mathbf{p}}. This results in :math:`4N + 1` evaluations. In the case where :math:`O` is involutory (:math:`\hat{O}^2 = I`), the first term in the above expression vanishes, and we are simply left with .. math:: \frac{\partial g}{\partial \mathbf{p}} = - 2 f(\mathbf{p}) \frac{\partial f}{\partial \mathbf{p}}, allowing us to compute the gradient using :math:`2N + 1` evaluations. **Example** >>> with qml.tape.QuantumTape() as tape: ... qml.RX(params[0], wires=0) ... qml.RY(params[1], wires=0) ... qml.RX(params[2], wires=0) ... qml.expval(qml.PauliZ(0)) ... qml.var(qml.PauliZ(0)) >>> tape.trainable_params = {0, 1, 2} >>> gradient_tapes, fn = qml.gradients.param_shift(tape) >>> res = dev.batch_execute(gradient_tapes) >>> fn(res) [[-0.38751721 -0.18884787 -0.38355704] [ 0.69916862 0.34072424 0.69202359]] """ # perform gradient method validation if any(m.return_type is qml.operation.State for m in tape.measurements): raise ValueError("Does not support circuits that return the state") _gradient_analysis(tape) gradient_tapes = [] # TODO: replace the JacobianTape._grad_method_validation # functionality before deprecation. method = "analytic" if fallback_fn is None else "best" diff_methods = tape._grad_method_validation(method) if not tape.trainable_params or all(g == "0" for g in diff_methods): # Either all parameters have grad method 0, or there are no trainable # parameters. return gradient_tapes, lambda x: np.zeros([tape.output_dim, len(tape.trainable_params)]) # TODO: replace the JacobianTape._choose_params_with_methods # functionality before deprecation. method_map = dict(tape._choose_params_with_methods(diff_methods, argnum)) # If there are unsupported operations, call the callback gradient function unsupported_params = {idx for idx, g in method_map.items() if g == "F"} if unsupported_params: g_tapes, fallback_proc_fn = fallback_fn(tape, argnum=unsupported_params) gradient_tapes.extend(g_tapes) fallback_len = len(g_tapes) # remove finite difference parameters from the method map method_map = {t_idx: dm for t_idx, dm in method_map.items() if dm != "F"} # Generate parameter-shift gradient tapes argnum = [i for i, dm in method_map.items() if dm == "A"] if gradient_recipes is None: gradient_recipes = [None] * len(argnum) if any(m.return_type is qml.operation.Variance for m in tape.measurements): g_tapes, fn = var_param_shift(tape, argnum, shift, gradient_recipes, f0) else: g_tapes, fn = expval_param_shift(tape, argnum, shift, gradient_recipes, f0) gradient_tapes.extend(g_tapes) if unsupported_params: # If there are unsupported parameters, we must process # the quantum results separately, once for the fallback # function and once for the parameter-shift rule, and recombine. def processing_fn(results): unsupported_grads = fallback_proc_fn(results[:fallback_len]) supported_grads = fn(results[fallback_len:]) return unsupported_grads + supported_grads else: processing_fn = fn return gradient_tapes, processing_fn
59,201
def fallback_resources(spec): package_directory = pathlib.Path(spec.origin).parent try: archive_path = spec.loader.archive rel_path = package_directory.relative_to(archive_path) return zipfile.Path(archive_path, str(rel_path) + '/') except Exception: pass return package_directory
def fallback_resources(spec): package_directory = pathlib.Path(spec.origin).parent try: archive_path = spec.loader.archive rel_path = package_directory.relative_to(archive_path) return zipfile.Path(archive_path, str(rel_path) + '/') except (ValueError, OSError): pass return package_directory
45,945
def get_hanning_kernel2d(kernel_size: Tuple[int, int], device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r"""Returns 2d Hanning kernel, used in signal processing and KCF tracker Args: kernel_size: It should be positive. Returns: 2D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size[0], kernel_size[1]})` """ if kernel_size[0] <= 2 or kernel_size[1] <= 2: raise TypeError(f"ksize must be an tuple of positive integers > 2. Got {kernel_size}") ky: torch.Tensor = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T kx: torch.Tensor = get_hanning_kernel1d(kernel_size[1], device, dtype)[None] kernel2d = ky @ kx return kernel2d
def get_hanning_kernel2d(kernel_size: Tuple[int, int], device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor: r"""Returns 2d Hanning kernel, used in signal processing and KCF tracker Args: kernel_size: The size of the kernel for the filter. It should be positive. Returns: 2D tensor with Hanning filter coefficients. .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right) Shape: - Output: math:`(\text{kernel_size[0], kernel_size[1]})` """ if kernel_size[0] <= 2 or kernel_size[1] <= 2: raise TypeError(f"ksize must be an tuple of positive integers > 2. Got {kernel_size}") ky: torch.Tensor = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T kx: torch.Tensor = get_hanning_kernel1d(kernel_size[1], device, dtype)[None] kernel2d = ky @ kx return kernel2d
28,115
def _trim_dollar(value): """Trime dollar character if present.""" return value[1:] if value.startswith("$") else value
def _trim_dollar(value): """Trim dollar character if present.""" return value[1:] if value.startswith("$") else value
34,955
def derive_similarity_tag(dag, log_base=1.618): """Derive the tag for similarity check from one computational DAG. The DAGs with the same tag are considered as similar tasks. Parameters ---------- dag: ComputeDAG The input computational DAG log_base: float = 1.618 The base of log to normalize FLOPS Returns ------- tag: str The tag of this computational DAG. """ ret = "" for op in dag.ops: tag = op.attrs.get("ansor_task_scheduler_tag", None) if tag: ret += op.attrs["ansor_task_scheduler_tag"] + "_" if ret != "": ret += "%d" % int(math.log(dag.flop_ct + 1, log_base)) return ret
def derive_similarity_tag(dag, log_base=1.618): """Derive the tag for similarity check from one computational DAG. The DAGs with the same tag are considered as similar tasks. Parameters ---------- dag: ComputeDAG The input computational DAG log_base: float = 1.618 The base of log to normalize FLOPS Returns ------- tag: str The tag of this computational DAG. """ ret = "" for op in dag.ops: tag = op.attrs.get("ansor_task_scheduler_tag", None) if tag: ret += op.attrs["ansor_task_scheduler_tag"] + "_" if ret: ret += "%d" % int(math.log(dag.flop_ct + 1, log_base)) return ret
1,261
def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) p.add_argument("infile", help="Neuroimaging volume to compute statistics on.") p.add_argument("-V", "--Volume", action="store_true", required=False, help="Compute mask volume of a given mask image.") p.add_argument("--units", default="mm3", required=False, help="Preferred output units of {mm3, vox}. Defaults to mm3") return p
def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) p.add_argument("infile", help="Neuroimaging volume to compute statistics on.") p.add_argument("-V", "--Volume", action="store_true", required=False, help="Compute mask volume of a given mask image.") p.add_argument("--units", default="mm3", required=False, choices=("mm3", "vox"), help="Preferred output units") return p
28,028
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ subcommands = parser.add_subparsers(title='available actions') # Create handlers for individual subcommands. runs = subcommands.add_parser( 'runs', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="List the analysis runs available on the server.", help="List the available analysis runs.") __register_runs(runs) runs.set_defaults(func=cmd_line_client.handle_list_runs) __add_common_arguments(runs, output_formats=DEFAULT_OUTPUT_FORMATS) run_histories = subcommands.add_parser( 'history', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Show run history for some analysis runs.", help="Show run history of multiple runs.") __register_run_histories(run_histories) run_histories.set_defaults(func=cmd_line_client.handle_list_run_histories) __add_common_arguments(run_histories, output_formats=DEFAULT_OUTPUT_FORMATS) results = subcommands.add_parser( 'results', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Show the individual analysis reports' summary.", help="List analysis result (finding) summary for a given run.", epilog='''Example scenario: List analysis results ------------------------------------------------ Get analysis results for a run: CodeChecker cmd results my_run Get analysis results for multiple runs: CodeChecker cmd results my_run1 my_run2 Get analysis results by using regex: CodeChecker cmd results "my_run*" Get analysis results for a run and filter the analysis results: CodeChecker cmd results my_run --severity critical high medium \\ --file "/home/username/my_project/*" CodeChecker cmd results my_run --review-status confirmed unreviewed \\ --component my_component_name''') __register_results(results) results.set_defaults(func=cmd_line_client.handle_list_results) __add_common_arguments(results, output_formats=DEFAULT_OUTPUT_FORMATS) diff = subcommands.add_parser( 'diff', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Compare two analysis runs to show the results that " "differ between the two.", help="Compare two analysis runs and show the difference.", epilog=''' envionment variables: CC_REPO_DIR Root directory of the sources, i.e. the directory where the repository was cloned. Use it when generating gerrit output. CC_REPORT_URL URL where the report can be found. Use it when generating gerrit output. CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when generating gerrit output. Example scenario: Compare multiple analysis runs ------------------------------------------------ Compare two runs and show results that didn't exist in the 'run1' but appear in the 'run2' run: CodeChecker cmd diff -b run1 -n run2 --new Compare a remote run with a local report directory and show results that didn't exist in the remote run 'run1' but appear in the local report directory: CodeChecker cmd diff -b run1 -n /my_report_dir --new Compare two runs and show results that exist in both runs and filter results by multiple severity values: CodeChecker cmd diff -b run1 -n run2 --unresolved --severity high medium''' ) __register_diff(diff) diff_output_formats = DEFAULT_OUTPUT_FORMATS + ["html", "gerrit", "codeclimate"] output_help_msg = "R|The output format(s) to use in showing the data.\n" \ "- html: multiple html files will be generated in the " \ "export directory.\n" \ "- gerrit: a 'gerrit_review.json' file will be " \ "generated in the export directory.\n" \ "- codeclimate: a 'codeclimate_issues.json' file will " \ "be generated in the export directory.\n" \ "For the output formats (json, gerrit, codeclimate) " \ "if an export directory is set the output files will " \ "be generated if not the results are printed to the " \ "stdout but only if one format was selected." __add_common_arguments(diff, output_formats=diff_output_formats, output_help_message=output_help_msg, allow_multiple_outputs=True) sum_p = subcommands.add_parser( 'sum', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Show checker statistics for some analysis runs.", help="Show statistics of checkers.", epilog='''Example scenario: Get checker statistics ------------------------------------------------ Get statistics for a run: CodeChecker cmd sum -n my_run Get statistics for all runs filtered by multiple checker names: CodeChecker cmd sum --all --checker-name "core.*" "deadcode.*" Get statistics for all runs and only for severity 'high': CodeChecker cmd sum --all --severity "high"''') __register_sum(sum_p) sum_p.set_defaults(func=cmd_line_client.handle_list_result_types) __add_common_arguments(sum_p, output_formats=DEFAULT_OUTPUT_FORMATS) token = subcommands.add_parser( 'token', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Access subcommands related to configuring personal " "access tokens managed by a CodeChecker server. Please " "see the individual subcommands for details.", help="Access subcommands related to configuring personal access " "tokens managed by a CodeChecker server.") __register_token(token) del_p = subcommands.add_parser( 'del', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description=""" Remove analysis runs from the server based on some criteria. !!! WARNING !!! When a run is deleted, ALL associated information (reports, files, run histories) is PERMANENTLY LOST! Please be careful with this command because it can not be undone. NOTE! You can't remove a snapshot of run (a run history), you can remove only full runs.""", help="Delete analysis runs.") __register_delete(del_p) del_p.set_defaults(func=cmd_line_client.handle_remove_run_results) __add_common_arguments(del_p) update_p = subcommands.add_parser( 'update', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Update the name of an analysis run.", help="Update an analysis run.") __register_update(update_p) update_p.set_defaults(func=cmd_line_client.handle_update_run) __add_common_arguments(update_p) suppress = subcommands.add_parser( 'suppress', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Imports suppressions from a suppress file to a " "CodeChecker server.", help="Manage and import suppressions of a CodeChecker server.") __register_suppress(suppress) suppress.set_defaults(func=cmd_line_client.handle_suppress) __add_common_arguments(suppress) products = subcommands.add_parser( 'products', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="CodeChecker organises its databases into products. " "Each product has an individually configured database " "which stores the analysis results. These subcommands " "are used to manage the products configured by the " "server. Please see the individual subcommands for " "details.", epilog="Most of these commands require authentication and " "appropriate access rights. Please see 'CodeChecker cmd " "login' to authenticate.", help="Access subcommands related to configuring the products managed " "by a CodeChecker server.") __register_products(products) __add_common_arguments(products, needs_product_url=None) components = subcommands.add_parser( 'components', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Source components are named collection of directories " "specified as directory filter.", help="Access subcommands related to configuring the source components " "managed by a CodeChecker server.") __register_source_components(components) __add_common_arguments(components) login = subcommands.add_parser( 'login', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Certain CodeChecker servers can require elevated " "privileges to access analysis results. In such cases " "it is mandatory to authenticate to the server. This " "action is used to perform an authentication in the " "command-line.", help="Authenticate into CodeChecker servers that require privileges.") __register_login(login) login.set_defaults(func=cmd_line_client.handle_login) __add_common_arguments(login, needs_product_url=False) export = subcommands.add_parser( 'export', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Export the comments and review status from " "codechecker server into a json format", help="Export the analysis to a json file " "for a given run" ) __register_export(export) export.set_defaults(func=cmd_line_client.handle_export) __add_common_arguments(export, output_formats=['json'])
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ subcommands = parser.add_subparsers(title='available actions') # Create handlers for individual subcommands. runs = subcommands.add_parser( 'runs', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="List the analysis runs available on the server.", help="List the available analysis runs.") __register_runs(runs) runs.set_defaults(func=cmd_line_client.handle_list_runs) __add_common_arguments(runs, output_formats=DEFAULT_OUTPUT_FORMATS) run_histories = subcommands.add_parser( 'history', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Show run history for some analysis runs.", help="Show run history of multiple runs.") __register_run_histories(run_histories) run_histories.set_defaults(func=cmd_line_client.handle_list_run_histories) __add_common_arguments(run_histories, output_formats=DEFAULT_OUTPUT_FORMATS) results = subcommands.add_parser( 'results', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Show the individual analysis reports' summary.", help="List analysis result (finding) summary for a given run.", epilog='''Example scenario: List analysis results ------------------------------------------------ Get analysis results for a run: CodeChecker cmd results my_run Get analysis results for multiple runs: CodeChecker cmd results my_run1 my_run2 Get analysis results by using regex: CodeChecker cmd results "my_run*" Get analysis results for a run and filter the analysis results: CodeChecker cmd results my_run --severity critical high medium \\ --file "/home/username/my_project/*" CodeChecker cmd results my_run --review-status confirmed unreviewed \\ --component my_component_name''') __register_results(results) results.set_defaults(func=cmd_line_client.handle_list_results) __add_common_arguments(results, output_formats=DEFAULT_OUTPUT_FORMATS) diff = subcommands.add_parser( 'diff', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Compare two analysis runs to show the results that " "differ between the two.", help="Compare two analysis runs and show the difference.", epilog=''' envionment variables: CC_REPO_DIR Root directory of the sources, i.e. the directory where the repository was cloned. Use it when generating gerrit output. CC_REPORT_URL URL where the report can be found. Use it when generating gerrit output. CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when generating gerrit output. Example scenario: Compare multiple analysis runs ------------------------------------------------ Compare two runs and show results that didn't exist in the 'run1' but appear in the 'run2' run: CodeChecker cmd diff -b run1 -n run2 --new Compare a remote run with a local report directory and show results that didn't exist in the remote run 'run1' but appear in the local report directory: CodeChecker cmd diff -b run1 -n /my_report_dir --new Compare two runs and show results that exist in both runs and filter results by multiple severity values: CodeChecker cmd diff -b run1 -n run2 --unresolved --severity high medium''' ) __register_diff(diff) diff_output_formats = DEFAULT_OUTPUT_FORMATS + ["html", "gerrit", "codeclimate"] output_help_msg = "R|The output format(s) to use in showing the data.\n" \ "- html: multiple html files will be generated in the " \ "export directory.\n" \ "- gerrit: a 'gerrit_review.json' file will be " \ "generated in the export directory.\n" \ "- codeclimate: a 'codeclimate_issues.json' file will " \ "be generated in the export directory.\n" \ "For the output formats (json, gerrit, codeclimate) " \ "if an export directory is set the output files will " \ "be generated if not the results are printed to the " \ "stdout but only if one format was selected." __add_common_arguments(diff, output_formats=diff_output_formats, output_help_message=output_help_msg, allow_multiple_outputs=True) sum_p = subcommands.add_parser( 'sum', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description="Show checker statistics for some analysis runs.", help="Show statistics of checkers.", epilog='''Example scenario: Get checker statistics ------------------------------------------------ Get statistics for a run: CodeChecker cmd sum -n my_run Get statistics for all runs filtered by multiple checker names: CodeChecker cmd sum --all --checker-name "core.*" "deadcode.*" Get statistics for all runs and only for severity 'high': CodeChecker cmd sum --all --severity "high"''') __register_sum(sum_p) sum_p.set_defaults(func=cmd_line_client.handle_list_result_types) __add_common_arguments(sum_p, output_formats=DEFAULT_OUTPUT_FORMATS) token = subcommands.add_parser( 'token', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Access subcommands related to configuring personal " "access tokens managed by a CodeChecker server. Please " "see the individual subcommands for details.", help="Access subcommands related to configuring personal access " "tokens managed by a CodeChecker server.") __register_token(token) del_p = subcommands.add_parser( 'del', formatter_class=arg.RawDescriptionDefaultHelpFormatter, description=""" Remove analysis runs from the server based on some criteria. !!! WARNING !!! When a run is deleted, ALL associated information (reports, files, run histories) is PERMANENTLY LOST! Please be careful with this command because it can not be undone. NOTE! You can't remove a snapshot of run (a run history), you can remove only full runs.""", help="Delete analysis runs.") __register_delete(del_p) del_p.set_defaults(func=cmd_line_client.handle_remove_run_results) __add_common_arguments(del_p) update_p = subcommands.add_parser( 'update', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Update the name of an analysis run.", help="Update an analysis run.") __register_update(update_p) update_p.set_defaults(func=cmd_line_client.handle_update_run) __add_common_arguments(update_p) suppress = subcommands.add_parser( 'suppress', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Imports suppressions from a suppress file to a " "CodeChecker server.", help="Manage and import suppressions of a CodeChecker server.") __register_suppress(suppress) suppress.set_defaults(func=cmd_line_client.handle_suppress) __add_common_arguments(suppress) products = subcommands.add_parser( 'products', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="CodeChecker organises its databases into products. " "Each product has an individually configured database " "which stores the analysis results. These subcommands " "are used to manage the products configured by the " "server. Please see the individual subcommands for " "details.", epilog="Most of these commands require authentication and " "appropriate access rights. Please see 'CodeChecker cmd " "login' to authenticate.", help="Access subcommands related to configuring the products managed " "by a CodeChecker server.") __register_products(products) __add_common_arguments(products, needs_product_url=None) components = subcommands.add_parser( 'components', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Source components are named collection of directories " "specified as directory filter.", help="Access subcommands related to configuring the source components " "managed by a CodeChecker server.") __register_source_components(components) __add_common_arguments(components) login = subcommands.add_parser( 'login', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Certain CodeChecker servers can require elevated " "privileges to access analysis results. In such cases " "it is mandatory to authenticate to the server. This " "action is used to perform an authentication in the " "command-line.", help="Authenticate into CodeChecker servers that require privileges.") __register_login(login) login.set_defaults(func=cmd_line_client.handle_login) __add_common_arguments(login, needs_product_url=False) export = subcommands.add_parser( 'export', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Export the comments and review status from " "codechecker server into a json format", help="Export data from a CodeChecker server to a json file." ) __register_export(export) export.set_defaults(func=cmd_line_client.handle_export) __add_common_arguments(export, output_formats=['json'])
17,415
def drop_missing_dims( supplied_dims: Collection, dims: Collection, missing_dims: str ) -> Collection: """Depending on the setting of missing_dims, drop any dimensions from supplied_dims that are not present in dims. Parameters ---------- supplied_dims : dict dims : sequence missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": supplied_dims_set = set(val for val in supplied_dims if val is not ...) invalid = supplied_dims_set - set(dims) if invalid: raise ValueError( f"dimensions {invalid} do not exist. Expected one or more of {dims}" ) return supplied_dims elif missing_dims == "warn": invalid = set(supplied_dims) - set(dims) if invalid: warnings.warn( f"dimensions {invalid} do not exist. Expected one or more of {dims}" ) return [val for val in supplied_dims if val in dims or val is ...] elif missing_dims == "ignore": return [val for val in supplied_dims if val in dims or val is ...] else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" )
def drop_missing_dims( supplied_dims: Collection, dims: Collection, missing_dims: str ) -> Collection: """Depending on the setting of missing_dims, drop any dimensions from supplied_dims that are not present in dims. Parameters ---------- supplied_dims : dict dims : sequence missing_dims : {"raise", "warn", "ignore"} """ if missing_dims == "raise": supplied_dims_set = set(val for val in supplied_dims if val is not ...) invalid = supplied_dims_set - set(dims) if invalid: raise ValueError( f"Dimensions {invalid} do not exist. Expected one or more of {dims}" ) return supplied_dims elif missing_dims == "warn": invalid = set(supplied_dims) - set(dims) if invalid: warnings.warn( f"dimensions {invalid} do not exist. Expected one or more of {dims}" ) return [val for val in supplied_dims if val in dims or val is ...] elif missing_dims == "ignore": return [val for val in supplied_dims if val in dims or val is ...] else: raise ValueError( f"Unrecognised option {missing_dims} for missing_dims argument" )
10,511
def present(module, dest, regexp, literal, line, insertafter, insertbefore, create, backup, backrefs, firstmatch): diff = {'before': '', 'after': '', 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) b_destpath = os.path.dirname(b_dest) if b_destpath and not os.path.exists(b_destpath) and not module.check_mode: try: os.makedirs(b_destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (b_destpath, e[0], e[1])) b_lines = [] else: with open(b_dest, 'rb') as f: b_lines = f.readlines() if module._diff: diff['before'] = to_native(b''.join(b_lines)) if regexp is not None: bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) if insertafter not in (None, 'BOF', 'EOF'): bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: bre_ins = None # index[0] is the line num where regexp has been found # index[1] is the line num where insertafter/insertbefore has been found index = [-1, -1] match = None exact_line_match = False b_line = to_bytes(line, errors='surrogate_or_strict') # The module's doc says # "If regular expressions are passed to both regexp and # insertafter, insertafter is only honored if no match for regexp is found." # Therefore: # 1. regexp or literal was found -> ignore insertafter, replace the founded line # 2. regexp or literal was not found -> insert the line after 'insertafter' or 'insertbefore' line # Given the above: # 1. First check that there is no match for regexp: if regexp is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = bre_m.search(b_cur_line) if match_found: index[0] = lineno match = match_found if firstmatch: break # 2. Second check that there is no match for literal: if literal is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = to_bytes( literal, errors='surrogate_or_strict') in b_cur_line if match_found: index[0] = lineno match = match_found if firstmatch: break # 3. When no match found on the previous step, # parse for searching insertafter/insertbefore: if not match: for lineno, b_cur_line in enumerate(b_lines): if b_line == b_cur_line.rstrip(b'\r\n'): index[0] = lineno exact_line_match = True elif bre_ins is not None and bre_ins.search(b_cur_line): if insertafter: # + 1 for the next line index[1] = lineno + 1 if firstmatch: break if insertbefore: # index[1] for the previous line index[1] = lineno if firstmatch: break msg = '' changed = False b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict') # Exact line or Regexp matched a line in the file if index[0] != -1: if backrefs and match: b_new_line = match.expand(b_line) else: # Don't do backref expansion if not asked. b_new_line = b_line if not b_new_line.endswith(b_linesep): b_new_line += b_linesep # If no regexp or literal was given and no line match is found anywhere in the file, # insert the line appropriately if using insertbefore or insertafter if regexp is None and literal is None and match is None and not exact_line_match: # Insert lines if insertafter and insertafter != 'EOF': # Ensure there is a line separator after the found string # at the end of the file. if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines[-1] = b_lines[-1] + b_linesep # If the line to insert after is at the end of the file # use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif insertbefore and insertbefore != 'BOF': # If the line to insert before is at the beginning of the file # use the appropriate index value. if index[1] <= 0: if b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[0]] != b_new_line: b_lines[index[0]] = b_new_line msg = 'line replaced' changed = True elif backrefs: # Do absolutely nothing, since it's not safe generating the line # without the regexp matching to populate the backrefs. pass # Add it to the beginning of the file elif insertbefore == 'BOF' or insertafter == 'BOF': b_lines.insert(0, b_line + b_linesep) msg = 'line added' changed = True # Add it to the end of the file if requested or # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines.append(b_linesep) b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif insertafter and index[1] != -1: # Don't insert the line if it already matches at the index. # If the line to insert after is at the end of the file use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_line != b_lines[index[1]].rstrip(b'\n\r'): b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True # insert matched, but not the regexp or literal else: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True if module._diff: diff['after'] = to_native(b''.join(b_lines)) backupdest = "" if changed and not module.check_mode: if backup and os.path.exists(b_dest): backupdest = module.backup_local(dest) write_changes(module, b_lines, dest) if module.check_mode and not os.path.exists(b_dest): module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % dest attr_diff['after_header'] = '%s (file attributes)' % dest difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def present(module, dest, regexp, literal, line, insertafter, insertbefore, create, backup, backrefs, firstmatch): diff = {'before': '', 'after': '', 'before_header': '%s (content)' % dest, 'after_header': '%s (content)' % dest} b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) b_destpath = os.path.dirname(b_dest) if b_destpath and not os.path.exists(b_destpath) and not module.check_mode: try: os.makedirs(b_destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (b_destpath, e[0], e[1])) b_lines = [] else: with open(b_dest, 'rb') as f: b_lines = f.readlines() if module._diff: diff['before'] = to_native(b''.join(b_lines)) if regexp is not None: bre_m = re.compile(to_bytes(regexp, errors='surrogate_or_strict')) if insertafter not in (None, 'BOF', 'EOF'): bre_ins = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): bre_ins = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: bre_ins = None # index[0] is the line num where regexp has been found # index[1] is the line num where insertafter/insertbefore has been found index = [-1, -1] match = None exact_line_match = False b_line = to_bytes(line, errors='surrogate_or_strict') # The module's doc says # "If regular expressions are passed to both regexp and # insertafter, insertafter is only honored if no match for regexp is found." # Therefore: # 1. regexp or literal was found -> ignore insertafter, replace the founded line # 2. regexp or literal was not found -> insert the line after 'insertafter' or 'insertbefore' line # Given the above: # 1. First check that there is no match for regexp: if regexp is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = bre_m.search(b_cur_line) if match_found: index[0] = lineno match = match_found if firstmatch: break # 2. Second check that there is no match for literal: if literal is not None: for lineno, b_cur_line in enumerate(b_lines): match_found = to_bytes( literal, errors='surrogate_or_strict') in b_cur_line if match_found: index[0] = lineno match = match_found if firstmatch: break # 3. When no match found on the previous step, # parse for searching insertafter/insertbefore: if not match: for lineno, b_cur_line in enumerate(b_lines): if b_line == b_cur_line.rstrip(b'\r\n'): index[0] = lineno exact_line_match = True elif bre_ins is not None and bre_ins.search(b_cur_line): if insertafter: # + 1 for the next line index[1] = lineno + 1 if firstmatch: break if insertbefore: # index[1] for the previous line index[1] = lineno if firstmatch: break msg = '' changed = False b_linesep = to_bytes(os.linesep, errors='surrogate_or_strict') # Exact line or Regexp matched a line in the file if index[0] != -1: if backrefs and match: b_new_line = match.expand(b_line) else: # Don't do backref expansion if not asked. b_new_line = b_line if not b_new_line.endswith(b_linesep): b_new_line += b_linesep # If no regexp or literal was given and no line match is found anywhere in the file, # insert the line appropriately if using insertbefore or insertafter if (regexp, literal, match) == (None, None, None) and not exact_line_match: # Insert lines if insertafter and insertafter != 'EOF': # Ensure there is a line separator after the found string # at the end of the file. if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines[-1] = b_lines[-1] + b_linesep # If the line to insert after is at the end of the file # use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif insertbefore and insertbefore != 'BOF': # If the line to insert before is at the beginning of the file # use the appropriate index value. if index[1] <= 0: if b_lines[index[1]].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True elif b_lines[index[0]] != b_new_line: b_lines[index[0]] = b_new_line msg = 'line replaced' changed = True elif backrefs: # Do absolutely nothing, since it's not safe generating the line # without the regexp matching to populate the backrefs. pass # Add it to the beginning of the file elif insertbefore == 'BOF' or insertafter == 'BOF': b_lines.insert(0, b_line + b_linesep) msg = 'line added' changed = True # Add it to the end of the file if requested or # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if b_lines and not b_lines[-1][-1:] in (b'\n', b'\r'): b_lines.append(b_linesep) b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif insertafter and index[1] != -1: # Don't insert the line if it already matches at the index. # If the line to insert after is at the end of the file use the appropriate index value. if len(b_lines) == index[1]: if b_lines[index[1] - 1].rstrip(b'\r\n') != b_line: b_lines.append(b_line + b_linesep) msg = 'line added' changed = True elif b_line != b_lines[index[1]].rstrip(b'\n\r'): b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True # insert matched, but not the regexp or literal else: b_lines.insert(index[1], b_line + b_linesep) msg = 'line added' changed = True if module._diff: diff['after'] = to_native(b''.join(b_lines)) backupdest = "" if changed and not module.check_mode: if backup and os.path.exists(b_dest): backupdest = module.backup_local(dest) write_changes(module, b_lines, dest) if module.check_mode and not os.path.exists(b_dest): module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % dest attr_diff['after_header'] = '%s (file attributes)' % dest difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
49,566
def compression_matrix(data, q, n_power_iter=0, seed=None, compute=False): """Randomly sample matrix to find most active subspace This compression matrix returned by this algorithm can be used to compute both the QR decomposition and the Singular Value Decomposition. Parameters ---------- data: Array q: int Size of the desired subspace (the actual size will be bigger, because of oversampling, see ``da.linalg.compression_level``) n_power_iter: int number of power iterations, useful when the singular values of the input matrix decay very slowly. compute : bool Whether or not to compute data at each use. Recomputing the input while performing several passes reduces memory pressure, but means that we have to compute the input multiple times. This is a good choice if the data is larger than memory and cheap to recreate. References ---------- N. Halko, P. G. Martinsson, and J. A. Tropp. Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011 https://arxiv.org/abs/0909.4061 """ m, n = data.shape comp_level = compression_level(min(m, n), q) if isinstance(seed, RandomState): state = seed else: state = RandomState(seed) if (data.dtype).itemsize <= 4: datatype = np.float32 else: datatype = np.float64 omega = state.standard_normal( size=(n, comp_level), chunks=(data.chunks[1], (comp_level,)) ).astype(datatype, copy=False) mat_h = data.dot(omega) for j in range(n_power_iter): if compute: mat_h = mat_h.persist() wait(mat_h) tmp = data.T.dot(mat_h) if compute: tmp = tmp.persist() wait(tmp) mat_h = data.dot(tmp) q, _ = tsqr(mat_h) return q.T
def compression_matrix(data, q, n_power_iter=0, seed=None, compute=False): """Randomly sample matrix to find most active subspace This compression matrix returned by this algorithm can be used to compute both the QR decomposition and the Singular Value Decomposition. Parameters ---------- data: Array q: int Size of the desired subspace (the actual size will be bigger, because of oversampling, see ``da.linalg.compression_level``) n_power_iter: int number of power iterations, useful when the singular values of the input matrix decay very slowly. compute : bool Whether or not to compute data at each use. Recomputing the input while performing several passes reduces memory pressure, but means that we have to compute the input multiple times. This is a good choice if the data is larger than memory and cheap to recreate. References ---------- N. Halko, P. G. Martinsson, and J. A. Tropp. Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011 https://arxiv.org/abs/0909.4061 """ m, n = data.shape comp_level = compression_level(min(m, n), q) if isinstance(seed, RandomState): state = seed else: state = RandomState(seed) datatype = data.dtype if datatype not in {np.float32, np.complex64, np.complex128}: datatype = np.float64 datatype = np.float32 else: datatype = np.float64 omega = state.standard_normal( size=(n, comp_level), chunks=(data.chunks[1], (comp_level,)) ).astype(datatype, copy=False) mat_h = data.dot(omega) for j in range(n_power_iter): if compute: mat_h = mat_h.persist() wait(mat_h) tmp = data.T.dot(mat_h) if compute: tmp = tmp.persist() wait(tmp) mat_h = data.dot(tmp) q, _ = tsqr(mat_h) return q.T
445
def _add_id_row_if_necessary(output_table, source_value): # DET requires an "id" field to exist to use SQL export. # if it doesn't insert one at the beginning of the table id_rows = [row for row in output_table.rows if row.field == ID_FIELD] if not id_rows: output_table.rows.insert(0, DETRow( source_field=source_value, field=ID_FIELD, ))
def _add_id_row_if_necessary(output_table, source_value): # DET requires an "id" field to exist to use SQL export. # Insert one at the beginning of the table if it doesn't exist. id_rows = [row for row in output_table.rows if row.field == ID_FIELD] if not id_rows: output_table.rows.insert(0, DETRow( source_field=source_value, field=ID_FIELD, ))
21,249
def handle_soft_bounce( message_id: str, bounceSubType: str, recipient_emails: List[str] ) -> None: """Handle a soft bounce notification received from SNS :param message_id: The unique message id assigned by Amazon SES :param bounceSubType: The subtype of the bounce, as determined by Amazon SES :param recipient_emails: a list of email addresses one per recipient to whom the bounce notification pertains :return: None """ back_off_events = ["Undetermined", "General", "MailboxFull"] small_only_events = ["MessageTooLarge", "AttachmentRejected"] MAX_RETRY_COUNTER = 5 INITIAL_HOURS = 2 for email in recipient_emails: if bounceSubType in back_off_events: # Handle events that must trigger a backoff event backoff_event = BackoffEvent.objects.filter( email_address=email, ) if not backoff_event.exists(): # Create a backoff event for an email address if not exists # Initialize retry_counter and next_retry_date next_retry_date = now() + timedelta(hours=INITIAL_HOURS) BackoffEvent.objects.create( email_address=email, retry_counter=0, next_retry_date=next_retry_date, ) else: # If a previous backoff event exists retry_counter = backoff_event[0].retry_counter next_retry_date = backoff_event[0].next_retry_date # Check if waiting period expired if next_retry_date <= now(): if retry_counter >= MAX_RETRY_COUNTER: # Check if backoff event has reached # max number of retries, if so ban email address # Only ban email address if not previously banned EmailFlag.objects.get_or_create( email_address=email, flag_type="ban", defaults={ "flag": "max_retry_reached", "reason": bounceSubType, }, ) else: # If max number of retries has not been reached, # update backoff event, update retry_counter new_retry_counter = retry_counter + 1 # Update new_next_retry_date exponentially new_next_retry_date = now() + timedelta( hours=pow(INITIAL_HOURS, new_retry_counter + 1) ) BackoffEvent.objects.filter( email_address=email, ).update( retry_counter=new_retry_counter, next_retry_date=new_next_retry_date, ) elif bounceSubType in small_only_events: # Handle events that must trigger a small_email_only event # Create a small_email_only flag for email address EmailFlag.objects.get_or_create( email_address=email, flag_type="flag", flag="small_email_only", defaults={"reason": bounceSubType}, ) else: # Handle other unexpected bounceSubType events, log a warning logging.warning( f"Unexpected {bounceSubType} soft bounce for {email}" )
def handle_soft_bounce( message_id: str, bounceSubType: str, recipient_emails: List[str] ) -> None: """Handle a soft bounce notification received from SNS :param message_id: The unique message id assigned by Amazon SES :param bounceSubType: The subtype of the bounce, as determined by Amazon SES :param recipient_emails: a list of email addresses one per recipient to whom the bounce notification pertains :return: None """ back_off_events = ["Undetermined", "General", "MailboxFull"] small_only_events = ["MessageTooLarge", "AttachmentRejected"] MAX_RETRY_COUNTER = 5 INITIAL_HOURS = 2 for email in recipient_emails: if bounceSubType in back_off_events: # Handle events that must trigger a backoff event backoff_event = BackoffEvent.objects.filter( email_address=email, ) if not backoff_event.exists(): # Create a backoff event for an email address if not exists # Initialize retry_counter and next_retry_date next_retry_date = now() + timedelta(hours=INITIAL_HOURS) BackoffEvent.objects.create( email_address=email, retry_counter=0, next_retry_date=next_retry_date, ) else: # If a previous backoff event exists retry_counter = backoff_event[0].retry_counter next_retry_date = backoff_event[0].next_retry_date # Check if waiting period expired if now() >= next_retry_date: if retry_counter >= MAX_RETRY_COUNTER: # Check if backoff event has reached # max number of retries, if so ban email address # Only ban email address if not previously banned EmailFlag.objects.get_or_create( email_address=email, flag_type="ban", defaults={ "flag": "max_retry_reached", "reason": bounceSubType, }, ) else: # If max number of retries has not been reached, # update backoff event, update retry_counter new_retry_counter = retry_counter + 1 # Update new_next_retry_date exponentially new_next_retry_date = now() + timedelta( hours=pow(INITIAL_HOURS, new_retry_counter + 1) ) BackoffEvent.objects.filter( email_address=email, ).update( retry_counter=new_retry_counter, next_retry_date=new_next_retry_date, ) elif bounceSubType in small_only_events: # Handle events that must trigger a small_email_only event # Create a small_email_only flag for email address EmailFlag.objects.get_or_create( email_address=email, flag_type="flag", flag="small_email_only", defaults={"reason": bounceSubType}, ) else: # Handle other unexpected bounceSubType events, log a warning logging.warning( f"Unexpected {bounceSubType} soft bounce for {email}" )
39,618
def parse_args(**kwargs: Any): kwargs['bind_addresses'] = kwargs.pop('bind_address') if kwargs['echo_runtime_info']: warnings.warn( "The `--echo-runtime-info` option is deprecated, use " "`--emit-server-status` instead.", DeprecationWarning, ) if kwargs['bootstrap']: warnings.warn( "Option `--bootstrap` is deprecated, use `--bootstrap-only`", DeprecationWarning, ) kwargs['bootstrap_only'] = True kwargs.pop('bootstrap', False) if kwargs['default_database_user']: if kwargs['default_database_user'] == 'edgedb': warnings.warn( "Option `--default-database-user` is deprecated." " Role `edgedb` is always created and" " no role named after unix user is created any more.", DeprecationWarning, ) else: warnings.warn( "Option `--default-database-user` is deprecated." " Please create the role explicitly.", DeprecationWarning, ) if kwargs['default_database']: if kwargs['default_database'] == 'edgedb': warnings.warn( "Option `--default-database` is deprecated." " Database `edgedb` is always created and" " no database named after unix user is created any more.", DeprecationWarning, ) else: warnings.warn( "Option `--default-database` is deprecated." " Please create the database explicitly.", DeprecationWarning, ) if kwargs['auto_shutdown']: warnings.warn( "The `--auto-shutdown` option is deprecated, use " "`--auto-shutdown-after` instead.", DeprecationWarning, ) if kwargs['auto_shutdown_after'] < 0: kwargs['auto_shutdown_after'] = 0 del kwargs['auto_shutdown'] if kwargs['postgres_dsn']: warnings.warn( "The `--postgres-dsn` option is deprecated, use " "`--backend-dsn` instead.", DeprecationWarning, ) if not kwargs['backend_dsn']: kwargs['backend_dsn'] = kwargs['postgres_dsn'] del kwargs['postgres_dsn'] if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS') == "1": if kwargs['binary_endpoint_security'] == "tls": abort( "The value of deprecated " "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS environment " "variable disagrees with --binary-endpoint-security" ) else: if kwargs['binary_endpoint_security'] == "default": warnings.warn( "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is " "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY " "instead.", DeprecationWarning, ) kwargs['binary_endpoint_security'] = 'optional' if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS') == "1": if kwargs['http_endpoint_security'] == "tls": abort( "The value of deprecated " "EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS environment " "variable disagrees with --http-endpoint-security" ) else: if kwargs['http_endpoint_security'] == "default": warnings.warn( "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is " "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY " "instead.", DeprecationWarning, ) kwargs['http_endpoint_security'] = 'optional' if kwargs['security'] == 'insecure_dev_mode': if kwargs['http_endpoint_security'] == 'default': kwargs['http_endpoint_security'] = 'optional' if not kwargs['default_auth_method']: kwargs['default_auth_method'] = 'Trust' if not (kwargs['tls_cert_file'] or kwargs['tls_key_file']): kwargs['generate_self_signed_cert'] = True elif not kwargs['default_auth_method']: kwargs['default_auth_method'] = 'SCRAM' if kwargs['security'] == 'default': kwargs['security'] = 'strict' if kwargs['binary_endpoint_security'] == 'default': kwargs['binary_endpoint_security'] = 'tls' if kwargs['http_endpoint_security'] == 'default': kwargs['http_endpoint_security'] = 'tls' kwargs['security'] = ServerSecurityMode(kwargs['security']) kwargs['binary_endpoint_security'] = ServerEndpointSecurityMode( kwargs['binary_endpoint_security']) kwargs['http_endpoint_security'] = ServerEndpointSecurityMode( kwargs['http_endpoint_security']) if kwargs['temp_dir']: if kwargs['data_dir']: abort('--temp-dir is incompatible with --data-dir/-D') if kwargs['runstate_dir']: abort('--temp-dir is incompatible with --runstate-dir') if kwargs['backend_dsn']: abort('--temp-dir is incompatible with --backend-dsn') kwargs['data_dir'] = kwargs['runstate_dir'] = pathlib.Path( tempfile.mkdtemp()) else: if not kwargs['data_dir']: if kwargs['backend_dsn']: pass elif devmode.is_in_dev_mode(): data_dir = devmode.get_dev_mode_data_dir() if not data_dir.parent.exists(): data_dir.parent.mkdir(exist_ok=True, parents=True) kwargs["data_dir"] = data_dir else: abort('Please specify the instance data directory ' 'using the -D argument or the address of a remote ' 'backend cluster using the --backend-dsn argument') elif kwargs['backend_dsn']: abort('The -D and --backend-dsn options are mutually exclusive.') if kwargs['tls_cert_file'] or kwargs['tls_key_file']: if tls_cert_file := kwargs['tls_cert_file']: if kwargs['generate_self_signed_cert']: abort("--tls-cert-file and --generate-self-signed-cert are " "mutually exclusive.") tls_cert_file = tls_cert_file.resolve() if not tls_cert_file.exists(): abort(f"File doesn't exist: --tls-cert-file={tls_cert_file}") kwargs['tls_cert_file'] = tls_cert_file elif kwargs['data_dir'] and ( tls_cert_file := kwargs['data_dir'] / TLS_CERT_FILE_NAME ).exists(): kwargs['tls_cert_file'] = tls_cert_file else: abort("Cannot find --tls-cert-file, but --tls-key-file is set") if tls_key_file := kwargs['tls_key_file']: if kwargs['generate_self_signed_cert']: abort("--tls-key-file and --generate-self-signed-cert are " "mutually exclusive.") tls_key_file = tls_key_file.resolve() if not tls_key_file.exists(): abort(f"File doesn't exist: --tls-key-file={tls_key_file}") kwargs['tls_key_file'] = tls_key_file elif kwargs['data_dir'] and ( tls_key_file := kwargs['data_dir'] / TLS_KEY_FILE_NAME ).exists(): kwargs['tls_key_file'] = tls_key_file else: if devmode.is_in_dev_mode(): kwargs['generate_self_signed_cert'] = True if data_dir := kwargs['data_dir']: if (tls_cert_file := data_dir / TLS_CERT_FILE_NAME).exists(): kwargs['tls_cert_file'] = tls_cert_file kwargs['generate_self_signed_cert'] = False if (tls_key_file := data_dir / TLS_KEY_FILE_NAME).exists(): kwargs['tls_key_file'] = tls_key_file if ( not kwargs['generate_self_signed_cert'] and not kwargs['tls_cert_file'] and not kwargs['bootstrap_only'] ): abort('Please specify a TLS certificate with --tls-cert-file.') if kwargs['log_level']: kwargs['log_level'] = kwargs['log_level'].lower()[0] bootstrap_script_text: Optional[str] if kwargs['bootstrap_script']: with open(kwargs['bootstrap_script']) as f: bootstrap_script_text = f.read() elif kwargs['bootstrap_command']: bootstrap_script_text = kwargs['bootstrap_command'] else: bootstrap_script_text = None if bootstrap_script_text is None: startup_script = None else: startup_script = StartupScript( text=bootstrap_script_text, database=( kwargs['default_database'] or defines.EDGEDB_SUPERUSER_DB ), user=( kwargs['default_database_user'] or defines.EDGEDB_SUPERUSER ), ) status_sinks = [] if status_sink_addrs := kwargs['emit_server_status']: for status_sink_addr in status_sink_addrs: if status_sink_addr.startswith('file://'): status_sink = _status_sink_file( status_sink_addr[len('file://'):]) elif status_sink_addr.startswith('fd://'): try: fileno = int(status_sink_addr[len('fd://'):]) except ValueError: abort( f'invalid file descriptor number in ' f'--emit-server-status: ' f'{status_sink_addr[len("fd://")]!r}' ) status_sink = _status_sink_fd(fileno) elif m := re.match(r'(^\w+)://', status_sink_addr): abort( f'unsupported destination scheme in --emit-server-status: ' f'{m.group(1)}' ) else: # Assume it's a file. status_sink = _status_sink_file(status_sink_addr) status_sinks.append(status_sink) return ServerConfig( startup_script=startup_script, status_sinks=status_sinks, **kwargs, )
def parse_args(**kwargs: Any): kwargs['bind_addresses'] = kwargs.pop('bind_address') if kwargs['echo_runtime_info']: warnings.warn( "The `--echo-runtime-info` option is deprecated, use " "`--emit-server-status` instead.", DeprecationWarning, ) if kwargs['bootstrap']: warnings.warn( "Option `--bootstrap` is deprecated, use `--bootstrap-only`", DeprecationWarning, ) kwargs['bootstrap_only'] = True kwargs.pop('bootstrap', False) if kwargs['default_database_user']: if kwargs['default_database_user'] == 'edgedb': warnings.warn( "Option `--default-database-user` is deprecated." " Role `edgedb` is always created and" " no role named after unix user is created any more.", DeprecationWarning, ) else: warnings.warn( "Option `--default-database-user` is deprecated." " Please create the role explicitly.", DeprecationWarning, ) if kwargs['default_database']: if kwargs['default_database'] == 'edgedb': warnings.warn( "Option `--default-database` is deprecated." " Database `edgedb` is always created and" " no database named after unix user is created any more.", DeprecationWarning, ) else: warnings.warn( "Option `--default-database` is deprecated." " Please create the database explicitly.", DeprecationWarning, ) if kwargs['auto_shutdown']: warnings.warn( "The `--auto-shutdown` option is deprecated, use " "`--auto-shutdown-after` instead.", DeprecationWarning, ) if kwargs['auto_shutdown_after'] < 0: kwargs['auto_shutdown_after'] = 0 del kwargs['auto_shutdown'] if kwargs['postgres_dsn']: warnings.warn( "The `--postgres-dsn` option is deprecated, use " "`--backend-dsn` instead.", DeprecationWarning, ) if not kwargs['backend_dsn']: kwargs['backend_dsn'] = kwargs['postgres_dsn'] del kwargs['postgres_dsn'] if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS') == "1": if kwargs['binary_endpoint_security'] == "tls": abort( "The value of deprecated " "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS environment " "variable disagrees with --binary-endpoint-security" ) else: if kwargs['binary_endpoint_security'] == "default": warnings.warn( "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is " "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY " "instead.", DeprecationWarning, ) kwargs['binary_endpoint_security'] = 'optional' if os.environ.get('EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS') == "1": if kwargs['http_endpoint_security'] == "tls": abort( "The value of deprecated " "EDGEDB_SERVER_ALLOW_INSECURE_HTTP_CLIENTS environment " "variable disagrees with --http-endpoint-security" ) else: if kwargs['http_endpoint_security'] == "default": warnings.warn( "EDGEDB_SERVER_ALLOW_INSECURE_BINARY_CLIENTS is " "deprecated. Use EDGEDB_SERVER_BINARY_ENDPOINT_SECURITY " "instead.", DeprecationWarning, ) kwargs['http_endpoint_security'] = 'optional' if kwargs['security'] == 'insecure_dev_mode': if kwargs['http_endpoint_security'] == 'default': kwargs['http_endpoint_security'] = 'optional' if not kwargs['default_auth_method']: kwargs['default_auth_method'] = 'Trust' if not (kwargs['tls_cert_file'] or kwargs['tls_key_file']): kwargs['generate_self_signed_cert'] = True elif not kwargs['default_auth_method']: kwargs['default_auth_method'] = 'SCRAM' if kwargs['security'] == 'default': kwargs['security'] = 'strict' if kwargs['binary_endpoint_security'] == 'default': kwargs['binary_endpoint_security'] = 'tls' if kwargs['http_endpoint_security'] == 'default': kwargs['http_endpoint_security'] = 'tls' kwargs['security'] = ServerSecurityMode(kwargs['security']) kwargs['binary_endpoint_security'] = ServerEndpointSecurityMode( kwargs['binary_endpoint_security']) kwargs['http_endpoint_security'] = ServerEndpointSecurityMode( kwargs['http_endpoint_security']) if kwargs['temp_dir']: if kwargs['data_dir']: abort('--temp-dir is incompatible with --data-dir/-D') if kwargs['runstate_dir']: abort('--temp-dir is incompatible with --runstate-dir') if kwargs['backend_dsn']: abort('--temp-dir is incompatible with --backend-dsn') kwargs['data_dir'] = kwargs['runstate_dir'] = pathlib.Path( tempfile.mkdtemp()) else: if not kwargs['data_dir']: if kwargs['backend_dsn']: pass elif devmode.is_in_dev_mode(): data_dir = devmode.get_dev_mode_data_dir() if not data_dir.parent.exists(): data_dir.parent.mkdir(exist_ok=True, parents=True) kwargs["data_dir"] = data_dir else: abort('Please specify the instance data directory ' 'using the -D argument or the address of a remote ' 'backend cluster using the --backend-dsn argument') elif kwargs['backend_dsn']: abort('The -D and --backend-dsn options are mutually exclusive.') if kwargs['tls_cert_file'] or kwargs['tls_key_file']: if tls_cert_file := kwargs['tls_cert_file']: if kwargs['generate_self_signed_cert']: abort("--tls-cert-file and --generate-self-signed-cert are " "mutually exclusive.") tls_cert_file = tls_cert_file.resolve() if not tls_cert_file.exists(): abort(f"File doesn't exist: --tls-cert-file={tls_cert_file}") kwargs['tls_cert_file'] = tls_cert_file elif kwargs['data_dir'] and ( tls_cert_file := kwargs['data_dir'] / TLS_CERT_FILE_NAME ).exists(): kwargs['tls_cert_file'] = tls_cert_file else: abort("Cannot find --tls-cert-file, but --tls-key-file is set") if tls_key_file := kwargs['tls_key_file']: if kwargs['generate_self_signed_cert']: abort("--tls-key-file and --generate-self-signed-cert are " "mutually exclusive.") tls_key_file = tls_key_file.resolve() if not tls_key_file.exists(): abort(f"File doesn't exist: --tls-key-file={tls_key_file}") kwargs['tls_key_file'] = tls_key_file elif kwargs['data_dir'] and ( tls_key_file := kwargs['data_dir'] / TLS_KEY_FILE_NAME ).exists(): kwargs['tls_key_file'] = tls_key_file else: if devmode.is_in_dev_mode(): kwargs['generate_self_signed_cert'] = True if data_dir := kwargs['data_dir']: if (tls_cert_file := data_dir / TLS_CERT_FILE_NAME).exists(): kwargs['tls_cert_file'] = tls_cert_file kwargs['generate_self_signed_cert'] = False if (tls_key_file := data_dir / TLS_KEY_FILE_NAME).exists(): kwargs['tls_key_file'] = tls_key_file if ( not kwargs['generate_self_signed_cert'] and not kwargs['tls_cert_file'] and not kwargs['bootstrap_only'] ): abort('Please specify a TLS certificate with --tls-cert-file.') if kwargs['log_level']: kwargs['log_level'] = kwargs['log_level'].lower()[0] bootstrap_script_text: Optional[str] if kwargs['bootstrap_script']: with open(kwargs['bootstrap_script']) as f: bootstrap_script_text = f.read() elif kwargs['bootstrap_command']: bootstrap_script_text = kwargs['bootstrap_command'] else: bootstrap_script_text = None if bootstrap_script_text is None: startup_script = None else: startup_script = StartupScript( text=bootstrap_script_text, database=( kwargs['default_database'] or defines.EDGEDB_SUPERUSER_DB ), user=( kwargs['default_database_user'] or defines.EDGEDB_SUPERUSER ), ) status_sinks = [] if status_sink_addrs := kwargs['emit_server_status']: for status_sink_addr in status_sink_addrs: if status_sink_addr.startswith('file://'): status_sink = _status_sink_file( status_sink_addr[len('file://'):]) elif status_sink_addr.startswith('fd://'): try: fileno = int(status_sink_addr[len('fd://'):]) except ValueError: abort( f'invalid file descriptor number in ' f'--emit-server-status: ' f'{status_sink_addr[len("fd://"):]!r}' ) status_sink = _status_sink_fd(fileno) elif m := re.match(r'(^\w+)://', status_sink_addr): abort( f'unsupported destination scheme in --emit-server-status: ' f'{m.group(1)}' ) else: # Assume it's a file. status_sink = _status_sink_file(status_sink_addr) status_sinks.append(status_sink) return ServerConfig( startup_script=startup_script, status_sinks=status_sinks, **kwargs, )
4,164
def _read_nihon_events(fname, orig_time): fname = _ensure_path(fname) annotations = None log_fname = fname.with_suffix('.LOG') if not log_fname.exists(): warn('No LOG file exists. Annotations will not be read') return annotations logger.info('Found LOG file, reading events.') with open(log_fname, 'r') as fid: version = np.fromfile(fid, '|S16', 1).astype('U16')[0] if version not in _valid_headers: raise ValueError( 'Not a valid Nihon Kohden LOG file ({})'.format(version)) fid.seek(0x91) n_logblocks = np.fromfile(fid, np.uint8, 1)[0] all_onsets = [] all_descriptions = [] for t_block in range(n_logblocks): fid.seek(0x92 + t_block * 20) t_blk_address = np.fromfile(fid, np.uint32, 1)[0] fid.seek(t_blk_address + 0x12) n_logs = np.fromfile(fid, np.uint8, 1)[0] fid.seek(t_blk_address + 0x14) t_logs = np.fromfile(fid, '|S45', n_logs).astype('U45') for t_log in t_logs: t_desc = t_log[:20].strip('\x00') t_onset = datetime.strptime(t_log[20:26], '%H%M%S') t_onset = (t_onset.hour * 3600 + t_onset.minute * 60 + t_onset.second) all_onsets.append(t_onset) all_descriptions.append(t_desc) annotations = Annotations(all_onsets, 0.0, all_descriptions, orig_time) return annotations
def _read_nihon_annotations(fname, orig_time): fname = _ensure_path(fname) annotations = None log_fname = fname.with_suffix('.LOG') if not log_fname.exists(): warn('No LOG file exists. Annotations will not be read') return annotations logger.info('Found LOG file, reading events.') with open(log_fname, 'r') as fid: version = np.fromfile(fid, '|S16', 1).astype('U16')[0] if version not in _valid_headers: raise ValueError( 'Not a valid Nihon Kohden LOG file ({})'.format(version)) fid.seek(0x91) n_logblocks = np.fromfile(fid, np.uint8, 1)[0] all_onsets = [] all_descriptions = [] for t_block in range(n_logblocks): fid.seek(0x92 + t_block * 20) t_blk_address = np.fromfile(fid, np.uint32, 1)[0] fid.seek(t_blk_address + 0x12) n_logs = np.fromfile(fid, np.uint8, 1)[0] fid.seek(t_blk_address + 0x14) t_logs = np.fromfile(fid, '|S45', n_logs).astype('U45') for t_log in t_logs: t_desc = t_log[:20].strip('\x00') t_onset = datetime.strptime(t_log[20:26], '%H%M%S') t_onset = (t_onset.hour * 3600 + t_onset.minute * 60 + t_onset.second) all_onsets.append(t_onset) all_descriptions.append(t_desc) annotations = Annotations(all_onsets, 0.0, all_descriptions, orig_time) return annotations
30,098
def test_save_signatures_to_location_1_zip(runtmp): # save to sigfile.gz sig2 = utils.get_test_data('2.fa.sig') ss2 = sourmash.load_one_signature(sig2, ksize=31) sig47 = utils.get_test_data('47.fa.sig') ss47 = sourmash.load_one_signature(sig47, ksize=31) outloc = runtmp.output('foo.zip') with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig: print(save_sig) save_sig.add(ss2) save_sig.add(ss47) # can we open as a .zip file? with zipfile.ZipFile(outloc, "r") as zf: assert list(zf.infolist()) saved = list(sourmash.load_file_as_signatures(outloc)) assert ss2 in saved assert ss47 in saved assert len(saved) == 2
def test_save_signatures_to_location_1_zip(runtmp): # save to sigfile.zip sig2 = utils.get_test_data('2.fa.sig') ss2 = sourmash.load_one_signature(sig2, ksize=31) sig47 = utils.get_test_data('47.fa.sig') ss47 = sourmash.load_one_signature(sig47, ksize=31) outloc = runtmp.output('foo.zip') with sourmash_args.SaveSignaturesToLocation(outloc) as save_sig: print(save_sig) save_sig.add(ss2) save_sig.add(ss47) # can we open as a .zip file? with zipfile.ZipFile(outloc, "r") as zf: assert list(zf.infolist()) saved = list(sourmash.load_file_as_signatures(outloc)) assert ss2 in saved assert ss47 in saved assert len(saved) == 2
28,502
def test_secured_server( container: TrackedContainer, http_client: requests.Session ) -> None: """Notebook server should eventually request user login.""" container.run_detached(ports={"8888/tcp": None}) host_port = container.get_host_port("8888/tcp") resp = http_client.get("http://localhost:" + host_port) resp.raise_for_status() assert "login_submit" in resp.text, "User login not requested"
def test_secured_server( container: TrackedContainer, http_client: requests.Session ) -> None: """Notebook server should eventually request user login.""" container.run_detached(ports={"8888/tcp": None}) host_port = container.get_host_port("8888/tcp") resp = http_client.get(f"http://localhost:{host_port}") resp.raise_for_status() assert "login_submit" in resp.text, "User login not requested"
27,727
def test_testdir_create_file_of_type(testdir): data = "hello world" one = testdir.makepyfile(data) two = testdir.maketxtfile(data) three = testdir.makeconftest(data) four = testdir.makeini(data) assert {one, two, three, four} == testdir.created_files
def test_testdir_create_file_of_type(testdir): data = "hello world" one = testdir.makepyfile(data) two = testdir.maketxtfile(data) three = testdir.makeconftest(data) four = testdir.makeini(data) assert testdir.created_files == {one, two, three, four}