repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
pyomeca/bioptim
examples/torque_driven_ocp/track_markers_with_torque_actuators.py
main
python
def main(): ocp = prepare_ocp("models/cube.bioMod", n_shooting=30, final_time=2, actuator_type=2) sol = ocp.solve(Solver.IPOPT(show_online_optim=True)) sol.animate()
Prepares and solves an ocp with torque actuators, the animates it
https://github.com/pyomeca/bioptim/blob/66c907eb63061e9bcfdc1ec5f16bd1a84d154e59/examples/torque_driven_ocp/track_markers_with_torque_actuators.py#L124-L135
import biorbd_casadi as biorbd from bioptim import ( Node, OptimalControlProgram, DynamicsList, DynamicsFcn, ObjectiveList, ObjectiveFcn, ConstraintList, ConstraintFcn, BoundsList, QAndQDotBounds, InitialGuessList, OdeSolver, Solver, ) def prepare_ocp( biorbd_model_path: str, n_shooting: int, final_time: float, actuator_type: int = None, ode_solver: OdeSolver = OdeSolver.RK4(), ) -> OptimalControlProgram: biorbd_model = biorbd.Model(biorbd_model_path) if actuator_type and actuator_type == 1: tau_min, tau_max, tau_init = -1, 1, 0 else: tau_min, tau_max, tau_init = -100, 100, 0 objective_functions = ObjectiveList() objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", weight=100) dynamics = DynamicsList() if actuator_type: if actuator_type == 1: dynamics.add(DynamicsFcn.TORQUE_ACTIVATIONS_DRIVEN) elif actuator_type == 2: dynamics.add(DynamicsFcn.TORQUE_DRIVEN) else: raise ValueError("actuator_type is 1 (torque activations) or 2 (torque max constraints)") else: expand = False if isinstance(ode_solver, OdeSolver.IRK) else True dynamics.add(DynamicsFcn.TORQUE_DRIVEN, expand=expand) constraints = ConstraintList() constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.START, first_marker="m0", second_marker="m1") constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.END, first_marker="m0", second_marker="m2") if actuator_type == 2: constraints.add(ConstraintFcn.TORQUE_MAX_FROM_Q_AND_QDOT, node=Node.ALL_SHOOTING, min_torque=7.5) x_bounds = BoundsList() x_bounds.add(bounds=QAndQDotBounds(biorbd_model)) x_bounds[0][3:6, [0, -1]] = 0 x_bounds[0][2, [0, -1]] = [0, 1.57] x_init = InitialGuessList() x_init.add([0] * (biorbd_model.nbQ() + biorbd_model.nbQdot())) u_bounds = BoundsList() u_bounds.add([tau_min] * biorbd_model.nbGeneralizedTorque(), [tau_max] * biorbd_model.nbGeneralizedTorque()) u_init = InitialGuessList() u_init.add([tau_init] * biorbd_model.nbGeneralizedTorque()) return OptimalControlProgram( biorbd_model, dynamics, n_shooting, final_time, x_init, u_init, x_bounds, u_bounds, objective_functions, constraints, ode_solver=ode_solver, )
MIT License
pesser/edflow
edflow/eval/pipeline.py
apply_callbacks
python
def apply_callbacks(callbacks, root, in_data, out_data, config, callback_kwargs={}): outputs = {} for name, cb in callbacks.items(): print( "Running callback '{}' on dataset '{}'".format(name, type(in_data).__name__) ) kwargs = callback_kwargs.get(name, {}) outputs[name] = cb(root, in_data, out_data, config, **kwargs) return outputs
Runs all given callbacks on the datasets ``in_data`` and ``out_data``. Parameters ---------- callbacks : dict(name: Callable) List of all callbacks to apply. All callbacks must accept at least the signitatue ``callback(root, data_in, data_out, config)``. If supplied via the config, additional keyword arguments are passed to the callback. These are expected under the keypath ``eval_pipeline/callback_kwargs``. in_data : DatasetMixin Dataset used to generate the content in ``out_data``. out_data : DatasetMixin Generated data. Example i is expected to be generated using ``in_data[i]``. config : dict edflow config dictionary. callback_kwargs : dict Keyword Arguments for the callbacks. Returns ------- outputs : dict(name: callback output) All results generated by the callbacks at the corresponding key.
https://github.com/pesser/edflow/blob/317cb1b61bf810a68004788d08418a5352653264/edflow/eval/pipeline.py#L803-L838
""" def __init__(self, config, root, model, **kwargs): self.model = model self.hooks += [EvalHook(self.dataset, callbacks={'cool_cb': my_callback}, config=config, # Must be specified for edeval step_getter=self.get_global_step)] def eval_op(self, inputs): return {'generated': self.model(inputs)} self.step_ops(self): return self.eval_op Next you run your evaluation on your data using your favourite edflow command. .. code-block:: bash edflow -n myexperiment -e the_config.yaml -p path_to_project This will create a new evaluation folder inside your project's eval directory. Inside this folder everything returned by your step ops is stored. In the case above this would mean your outputs would be stored as ``generated:index.something``. But you don't need to concern yourself with that, as the outputs can now be loaded using the :class:`EvalDataFolder`. All you need to do is pass the EvalDataFolder the root folder in which the data has been saved, which is the folder where you can find the ``model_outputs.csv``. Now you have all the generated data easily usable at hand. The indices of the data in the EvalDataFolder correspond to the indices of the data in the dataset, which was used to create the model outputs. So you can directly compare inputs, targets etc, with the outputs of your model! If you specified a callback, this all happens automatically. Each callback receives at least 4 parameters: The ``root``, where the data lives, the two datasets ``data_in``, which was fed into the model and ``data_out``, which was generated by the model, and the ``config``. You can specify additional keyword arguments by defining them in the config under ``eval_pipeline/callback_kwargs``. Should you want to run evaluations on the generated data after it has been generated, you can run the ``edeval`` command while specifying the path to the model outputs csv and the callbacks you want to run. .. code-block:: bash edeval -c path/to/model_outputs.csv -cb name1:callback1 name2:callback2 The callbacks must be supplied using ``name:callback`` pairs. Names must be unique as ``edeval`` will construct a dictionary from these inputs. If at some point you need to specify new parameters in your config or change existing ones, you can do so exactly like you would when running the ``edflow`` command. Simply pass the parameters you want to add/change via the commandline like this: .. code-block:: bash edeval -c path/to/model_outputs.csv -cb name1:callback1 --key1 val1 --key/path/2 val2 .. warning:: Changing config parameters from the commandline adds some dangers to the eval worklow: E.g. you can change parameters which determine the construction of the generating dataset, which potentially breaks the mapping between inputs and outputs. """ import os, shutil import numpy as np import yaml from PIL import Image from edflow.data.util import adjust_support from edflow.util import walk, retrieve, pop_keypath, set_value from edflow.data.dataset import DatasetMixin from edflow.data.believers.meta import MetaDataset from edflow.project_manager import ProjectManager as P from edflow.hooks.hook import Hook from edflow.custom_logging import get_logger LOADABLE_EXTS = ["png", "npy"] class EvalHook(Hook): def __init__( self, datasets, sub_dir_keys=[], labels_key=None, callbacks={}, config=None, step_getter=None, keypath="step_ops", clean_after_callbacks=False, ): self.logger = get_logger(self) config_cbs = retrieve(config, "eval_pipeline/callbacks", default={}) callbacks.update(config_cbs) self.cb_names = list(callbacks.keys()) self.cb_paths = list(callbacks.values()) self.cbacks = load_callbacks(callbacks) self.logger.info("{}".format(self.cbacks)) self.sdks = sub_dir_keys self.labels_key = labels_key if self.labels_key is None: self.labels_key = os.path.join(keypath, "labels") self.datasets = datasets self.data_in = self.datasets["validation"] self.config = config self.gs = step_getter self.keypath = keypath self.clean_after_callbacks = clean_after_callbacks def before_epoch(self, epoch): self.root = os.path.join(P.latest_eval, str(self.gs())) self.save_root = os.path.join(self.root, "model_outputs") os.makedirs(self.root, exist_ok=True) os.makedirs(self.save_root, exist_ok=True) os.makedirs(os.path.join(self.save_root, "labels"), exist_ok=True) self.label_arrs = None def before_step(self, step, fetches, feeds, batch): self.idxs = np.array(batch["index_"], dtype=int) def after_step(self, step, last_results): label_vals = pop_keypath(last_results, self.labels_key, default={}) idxs = self.idxs path_dicts = save_output( root=self.save_root, example=last_results, index=idxs, sub_dir_keys=self.sdks, keypath=self.keypath, ) for idx in idxs: for key, path in path_dicts[idx].items(): if key not in label_vals: label_vals[key] = [] label_vals[key] += [path] for key in list(path_dicts[idxs[0]].keys()): label_vals[key] = np.array(label_vals[key]) if self.label_arrs is None: self.label_arrs = {} for k in label_vals.keys(): example = label_vals[k][0] ex_shape = list(np.shape(example)) shape = [len(self.data_in)] + ex_shape s = "x".join([str(s) for s in shape]) dtype = d = example.dtype k_ = k.replace("/", "--") savepath = os.path.join( self.save_root, "labels", "{}-*-{}-*-{}.npy".format(k_, s, d) ) memmap = np.memmap(savepath, shape=tuple(shape), mode="w+", dtype=dtype) self.label_arrs[k] = memmap for k in label_vals.keys(): for i, idx in enumerate(idxs): self.label_arrs[k][idx] = label_vals[k][i] def at_exception(self, *args, **kwargs): self.exception_occured = True if hasattr(self, "root"): self.save_meta() self.logger.info("Warning: Evaluation data is incomplete!") def after_epoch(self, epoch): self.save_meta() data_out = MetaDataset(self.save_root) data_out.expand = True data_out.append_labels = True cb_kwargs = retrieve(self.config, "eval_pipeline/callback_kwargs", default={}) results = dict() for n, cb in self.cbacks.items(): self.logger.info( "Running callback '{}' on dataset '{}'".format( n, type(self.data_in).__name__ ) ) kwargs = cb_kwargs.get(n, {}) results[n] = cb(self.root, self.data_in, data_out, self.config, **kwargs) if self.clean_after_callbacks: if not os.path.split(self.save_root)[1] == "model_outputs": raise ValueError( "Expected a 'model_outputs' directory " "to clean but found {}.".format(self.save_root) ) self.logger.info("Cleaning up evaluation data at {}".format(self.save_root)) shutil.rmtree(self.save_root, ignore_errors=True) return results def save_meta(self): if not hasattr(self, "exception_occured"): had_exception = "" else: had_exception = f" .. warning ::\n\n An exception occured during creation.\n\n" description = f" # Model Outputs\n{had_exception}" meta_path = add_meta_data(self.save_root, self.config, description) cb_names = self.cb_names cb_paths = self.cb_paths if cb_names: cbs = " ".join("{}:{}".format(k, v) for k, v in zip(cb_names, cb_paths)) else: cbs = "<name>:<your callback>" self.logger.info("MODEL_OUTPUT_ROOT {}".format(self.save_root)) self.logger.info( "All data has been produced. You can now also run all" + " callbacks using the following command:\n" + f"edeval -m {self.save_root} -c {cbs}" ) self.logger.info( "To directly reuse the data simply use the following command:\n" + "from edflow.data.believers.meta import MetaDataset\n" + f'M = MetaDataset("{os.path.abspath(self.save_root)}")\n' ) class TemplateEvalHook(EvalHook): def __init__(self, *args, **kwargs): cb_handler = kwargs.pop("callback_handler", None) super().__init__(*args, **kwargs) self.cb_handler = cb_handler def before_epoch(self, *args, **kwargs): self._active = True super().before_epoch(*args, **kwargs) def before_step(self, *args, **kwargs): if self._active: super().before_step(*args, **kwargs) def after_step(self, step, last_results): tmp = object() if retrieve(last_results, self.keypath, default=tmp) in [None, tmp]: self._active = False if self._active: super().after_step(step, last_results) def after_epoch(self, *args, **kwargs): if self._active: cb_results = super().after_epoch(*args, **kwargs) if self.cb_handler is not None: results = dict() set_value(results, self.keypath, cb_results) paths = [self.keypath + "/" + cb for cb in self.cb_names] self.cb_handler(results=results, paths=paths) self._active = False def at_exception(self, *args, **kwargs): if self._active: super().at_exception(*args, **kwargs) def save_output(root, example, index, sub_dir_keys=[], keypath="step_ops"): example = retrieve(example, keypath) sub_dirs = [""] * len(index) for subk in sub_dir_keys: sub_vals = _delget(example, subk) for i, sub_val in enumerate(sub_vals): name = "{}:{}".format(subk, sub_val) name = name.replace("/", "--") sub_dirs[i] = os.path.join(sub_dirs[i], name) roots = [os.path.join(root, sub_dir) for sub_dir in sub_dirs] for r in roots: os.makedirs(r, exist_ok=True) roots += [root] path_dicts = {} for i, [idx, root] in enumerate(zip(index, roots)): path_dict = {} for n, e in example.items(): savename = "{}_{:0>6d}.{{}}".format(n, idx) path = os.path.join(root, savename) savedir = os.path.split(path)[0] os.makedirs(savedir, exist_ok=True) path, inferred_loader = save_example(path, e[i]) path_dict[f"{n}:{inferred_loader}"] = path path_dicts[idx] = path_dict return path_dicts def add_meta_data(eval_root, metadata, description=None): meta_string = yaml.dump(metadata) meta_path = os.path.join(eval_root, "meta.yaml") with open(meta_path, "w+") as meta_file: if description is None: description = "Created with the `EvalHook`" meta_file.write(f"description: |\n{description}") meta_file.write(meta_string) return meta_path def _delget(d, k): v = d[k] del d[k] return v def save_example(savepath, datum): saver, ending = determine_saver(datum) loader_name = determine_loader(ending) savepath = savepath.format(ending) saver(savepath, datum) return savepath, loader_name def determine_saver(py_obj): if isinstance(py_obj, np.ndarray): if isimage(py_obj): return image_saver, "png" else: return np_saver, "npy" elif isinstance(py_obj, str): return txt_saver, "txt" else: raise NotImplementedError( "There currently is not saver heuristic " + "for {}".format(type(py_obj)) ) def determine_loader(ext): if ext == "png": return "image" elif ext == "npy": return "np" else: raise ValueError("Cannot load file with extension `{}`".format(ext)) def decompose_name(name): try: splits = name.split("_") rest = splits[-1] datum_name = "_".join(splits[:-1]) index, ending = rest.split(".") return int(index), datum_name, ending except Exception as e: print("Faulty name:", name) raise e def is_loadable(filename): if "." in filename: name, ext = filename.split(".") if ext not in LOADABLE_EXTS: return False elif name.count("_") != 1: return False else: return True else: return False def isimage(np_arr): shape = np_arr.shape return len(shape) == 3 and shape[-1] in [1, 3, 4] def image_saver(savepath, image): im_adjust = adjust_support(image, "0->255", clip=True) modes = {1: "L", 3: "RGB", 4: "RGBA"} mode = modes[im_adjust.shape[-1]] if mode == "L": im_adjust = np.squeeze(im_adjust, -1) im = Image.fromarray(im_adjust, mode) im.save(savepath) def np_saver(savepath, np_arr): np.save(savepath, np_arr) def standalone_eval_meta_dset( path_to_meta_dir, callbacks, additional_kwargs={}, other_config=None ): from edflow.util import get_obj_from_str from edflow.config import update_config import yaml if other_config is not None: with open(other_config, "r") as f: other_config = yaml.full_load(f) else: other_config = {} out_data = MetaDataset(path_to_meta_dir) out_data.expand = True out_data.append_labels = True config = out_data.meta if not "datasets" in config: config["datasets"] = {"train": config["dataset"]} if "validation_dataset" in config: config["datasets"]["validation"] = config["validation_dataset"] datasets = dict( (split, get_obj_from_str(config["datasets"][split])) for split in config["datasets"] ) in_data = datasets["validation"](config=config) in_data.expand = True update_config(config, additional_kwargs) config.update(other_config) config_callbacks, callback_kwargs = config2cbdict(config) callbacks.update(config_callbacks) callbacks = load_callbacks(callbacks) root = os.path.dirname(path_to_meta_dir) outputs = apply_callbacks( callbacks, root, in_data, out_data, config, callback_kwargs ) return outputs def load_callbacks(callbacks): import importlib import sys sys.path.append(os.getcwd()) loaded_callbacks = dict() for name, cb in callbacks.items(): if isinstance(cb, str): module = ".".join(cb.split(".")[:-1]) module = importlib.import_module(module) cb = getattr(module, cb.split(".")[-1]) loaded_callbacks[name] = cb else: loaded_callbacks[name] = cb return loaded_callbacks
MIT License
jithurjacob/windows-10-toast-notifications
win10toast/__init__.py
ToastNotifier.on_destroy
python
def on_destroy(self, hwnd, msg, wparam, lparam): nid = (self.hwnd, 0) Shell_NotifyIcon(NIM_DELETE, nid) PostQuitMessage(0) return None
Clean after notification ended. :hwnd: :msg: :wparam: :lparam:
https://github.com/jithurjacob/windows-10-toast-notifications/blob/9d52b73f1af6c60162cf09b99269c4f7b13cdb00/win10toast/__init__.py#L144-L156
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals __all__ = ['ToastNotifier'] import logging import threading from os import path from time import sleep from pkg_resources import Requirement from pkg_resources import resource_filename from win32api import GetModuleHandle from win32api import PostQuitMessage from win32con import CW_USEDEFAULT from win32con import IDI_APPLICATION from win32con import IMAGE_ICON from win32con import LR_DEFAULTSIZE from win32con import LR_LOADFROMFILE from win32con import WM_DESTROY from win32con import WM_USER from win32con import WS_OVERLAPPED from win32con import WS_SYSMENU from win32gui import CreateWindow from win32gui import DestroyWindow from win32gui import LoadIcon from win32gui import LoadImage from win32gui import NIF_ICON from win32gui import NIF_INFO from win32gui import NIF_MESSAGE from win32gui import NIF_TIP from win32gui import NIM_ADD from win32gui import NIM_DELETE from win32gui import NIM_MODIFY from win32gui import RegisterClass from win32gui import UnregisterClass from win32gui import Shell_NotifyIcon from win32gui import UpdateWindow from win32gui import WNDCLASS class ToastNotifier(object): def __init__(self): self._thread = None def _show_toast(self, title, msg, icon_path, duration): message_map = {WM_DESTROY: self.on_destroy, } self.wc = WNDCLASS() self.hinst = self.wc.hInstance = GetModuleHandle(None) self.wc.lpszClassName = str("PythonTaskbar") self.wc.lpfnWndProc = message_map try: self.classAtom = RegisterClass(self.wc) except: pass style = WS_OVERLAPPED | WS_SYSMENU self.hwnd = CreateWindow(self.classAtom, "Taskbar", style, 0, 0, CW_USEDEFAULT, CW_USEDEFAULT, 0, 0, self.hinst, None) UpdateWindow(self.hwnd) if icon_path is not None: icon_path = path.realpath(icon_path) else: icon_path = resource_filename(Requirement.parse("win10toast"), "win10toast/data/python.ico") icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE try: hicon = LoadImage(self.hinst, icon_path, IMAGE_ICON, 0, 0, icon_flags) except Exception as e: logging.error("Some trouble with the icon ({}): {}" .format(icon_path, e)) hicon = LoadIcon(0, IDI_APPLICATION) flags = NIF_ICON | NIF_MESSAGE | NIF_TIP nid = (self.hwnd, 0, flags, WM_USER + 20, hicon, "Tooltip") Shell_NotifyIcon(NIM_ADD, nid) Shell_NotifyIcon(NIM_MODIFY, (self.hwnd, 0, NIF_INFO, WM_USER + 20, hicon, "Balloon Tooltip", msg, 200, title)) sleep(duration) DestroyWindow(self.hwnd) UnregisterClass(self.wc.lpszClassName, None) return None def show_toast(self, title="Notification", msg="Here comes the message", icon_path=None, duration=5, threaded=False): if not threaded: self._show_toast(title, msg, icon_path, duration) else: if self.notification_active(): return False self._thread = threading.Thread(target=self._show_toast, args=(title, msg, icon_path, duration)) self._thread.start() return True def notification_active(self): if self._thread != None and self._thread.is_alive(): return True return False
MIT License
purestorage-openconnect/py-pure-client
pypureclient/flasharray/FA_2_8/api/snmp_managers_api.py
SNMPManagersApi.api28_snmp_managers_patch_with_http_info
python
def api28_snmp_managers_patch_with_http_info( self, snmp_manager=None, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if snmp_manager is None: raise TypeError("Missing the required parameter `snmp_manager` when calling `api28_snmp_managers_patch`") collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None if 'snmp_manager' in params: body_params = params['snmp_manager'] header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/snmp-managers', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
Modify SNMP manager Modifies the name or the protocol attributes of the specified SNMP manager. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.api28_snmp_managers_patch_with_http_info(snmp_manager, async_req=True) >>> result = thread.get() :param SnmpManagerPatch snmp_manager: (required) :param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`) :param str x_request_id: Supplied by client during request or generated by server. :param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`. :param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. :param bool _return_http_data_only: Returns only data field. :param bool _preload_content: Response is converted into objects. :param int _request_timeout: Total request timeout in seconds. It can also be a tuple of (connection time, read time) timeouts. :return: SnmpManagerResponse If the method is called asynchronously, returns the request thread.
https://github.com/purestorage-openconnect/py-pure-client/blob/2d9fdef0b73321cea9613e7d1eb881b42845099b/pypureclient/flasharray/FA_2_8/api/snmp_managers_api.py#L237-L330
from __future__ import absolute_import import re import six from typing import List, Optional from .. import models class SNMPManagersApi(object): def __init__(self, api_client): self.api_client = api_client def api28_snmp_managers_delete_with_http_info( self, authorization=None, x_request_id=None, names=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] collection_formats = {} path_params = {} query_params = [] if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/snmp-managers', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, ) def api28_snmp_managers_get_with_http_info( self, authorization=None, x_request_id=None, continuation_token=None, filter=None, limit=None, names=None, offset=None, sort=None, total_item_count=None, async_req=False, _return_http_data_only=False, _preload_content=True, _request_timeout=None, ): if names is not None: if not isinstance(names, list): names = [names] if sort is not None: if not isinstance(sort, list): sort = [sort] params = {k: v for k, v in six.iteritems(locals()) if v is not None} if params.get('filter'): params['filter'] = str(params['filter']) if params.get('sort'): params['sort'] = [str(_x) for _x in params['sort']] if 'limit' in params and params['limit'] < 1: raise ValueError("Invalid value for parameter `limit` when calling `api28_snmp_managers_get`, must be a value greater than or equal to `1`") if 'offset' in params and params['offset'] < 0: raise ValueError("Invalid value for parameter `offset` when calling `api28_snmp_managers_get`, must be a value greater than or equal to `0`") collection_formats = {} path_params = {} query_params = [] if 'continuation_token' in params: query_params.append(('continuation_token', params['continuation_token'])) if 'filter' in params: query_params.append(('filter', params['filter'])) if 'limit' in params: query_params.append(('limit', params['limit'])) if 'names' in params: query_params.append(('names', params['names'])) collection_formats['names'] = 'csv' if 'offset' in params: query_params.append(('offset', params['offset'])) if 'sort' in params: query_params.append(('sort', params['sort'])) collection_formats['sort'] = 'csv' if 'total_item_count' in params: query_params.append(('total_item_count', params['total_item_count'])) header_params = {} if 'authorization' in params: header_params['Authorization'] = params['authorization'] if 'x_request_id' in params: header_params['X-Request-ID'] = params['x_request_id'] form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) header_params['Content-Type'] = self.api_client.select_header_content_type( ['application/json']) auth_settings = [] return self.api_client.call_api( '/api/2.8/snmp-managers', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SnmpManagerGetResponse', auth_settings=auth_settings, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, collection_formats=collection_formats, )
BSD 2-Clause Simplified License
openforcefield/openff-interchange
openff/interchange/interoperability_tests/test_interoperability_pathways.py
openff_openmm_pmd_gmx
python
def openff_openmm_pmd_gmx( topology: Topology, forcefield: ForceField, box: ArrayQuantity, prefix: str, ) -> None: topology.box_vectors = box.to(unit.nanometer).magnitude * omm_unit.nanometer omm_sys = forcefield.create_openmm_system(topology) struct = pmd.openmm.load_topology( system=omm_sys, topology=topology.to_openmm(), xyz=topology.topology_molecules[0].reference_molecule.conformers[0], ) for res in struct.residues: res.name = "FOO" struct.save(prefix + ".gro") struct.save(prefix + ".top")
Pipeline to write GROMACS files from and OpenMM interchange through ParmEd
https://github.com/openforcefield/openff-interchange/blob/a080e348b62c36c3c6a6b04e8afde64556f3186e/openff/interchange/interoperability_tests/test_interoperability_pathways.py#L18-L40
import tempfile import numpy as np import parmed as pmd import pytest from openff.toolkit.topology import Molecule, Topology from openff.toolkit.typing.engines.smirnoff import ForceField from openff.units import unit from openff.utilities import temporary_cd from openmm import unit as omm_unit from openff.interchange.components.interchange import Interchange from openff.interchange.drivers.gromacs import _get_mdp_file, _run_gmx_energy from openff.interchange.testing.utils import needs_gmx from openff.interchange.types import ArrayQuantity
MIT License
robotlocomotion/director
src/python/director/segmentationroutines.py
fitDrillBarrel
python
def fitDrillBarrel ( drillPoints, forwardDirection, plane_origin, plane_normal): if not drillPoints.GetNumberOfPoints(): return vis.updatePolyData(drillPoints, 'drill cluster', parent=getDebugFolder(), visible=False) drillBarrelPoints = thresholdPoints(drillPoints, 'dist_to_plane', [0.177, 0.30]) if not drillBarrelPoints.GetNumberOfPoints(): return linePoint, lineDirection, _ = applyLineFit(drillBarrelPoints, distanceThreshold=0.5) if np.dot(lineDirection, forwardDirection) < 0: lineDirection = -lineDirection vis.updatePolyData(drillBarrelPoints, 'drill barrel points', parent=getDebugFolder(), visible=False) pts = vtkNumpy.getNumpyFromVtk(drillBarrelPoints, 'Points') dists = np.dot(pts-linePoint, lineDirection) p1 = linePoint + lineDirection*np.min(dists) p2 = linePoint + lineDirection*np.max(dists) p1 = projectPointToPlane(p1, plane_origin, plane_normal) p2 = projectPointToPlane(p2, plane_origin, plane_normal) d = DebugData() d.addSphere(p1, radius=0.01) d.addSphere(p2, radius=0.01) d.addLine(p1, p2) vis.updatePolyData(d.getPolyData(), 'drill debug points', color=[0,1,0], parent=getDebugFolder(), visible=False) drillToBasePoint = np.array([-0.07, 0.0 , -0.12]) zaxis = plane_normal xaxis = lineDirection xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) t = getTransformFromAxes(xaxis, yaxis, zaxis) t.PreMultiply() t.Translate(-drillToBasePoint) t.PostMultiply() t.Translate(p1) return t
Given a point cloud which ONLY contains points from a barrell drill, standing upright and the equations of a table its resting on, and the general direction of the robot Fit a barrell drill
https://github.com/robotlocomotion/director/blob/4c3e570a6797ff840c74067c742455daaa113d93/src/python/director/segmentationroutines.py#L301-L360
from director.filterUtils import * import director.visualization as vis from director import objectmodel as om from director.transformUtils import getTransformFromAxes from director import vtkAll as vtk from . import vtkNumpy import numpy as np from .shallowCopy import shallowCopy from .debugVis import DebugData class SegmentationContext(object): def __init__(self, groundHeightProvider, viewProvider): self.groundHeightProvider = groundHeightProvider self.viewProvider = viewProvider def getGroundHeight(self): return self.groundHeightProvider.getGroundHeight() def getViewFrame(self): return self.viewProvider.getViewFrame() def getViewOrigin(self): return self.viewProvider.getViewOrigin() def getViewDirection(self): return self.viewProvider.getViewDirection() ''' These static methods are provided for convenience to initialize a globalally accessible instance of the SegmentationContext. ''' _globalSegmentationContext = None @staticmethod def installGlobalInstance(inst): if SegmentationContext._globalSegmentationContext is not None: raise Exception('Error, a global segmentation context instance is already installed.') SegmentationContext._globalSegmentationContext = inst @staticmethod def getGlobalInstance(): if SegmentationContext._globalSegmentationContext is None: raise Exception('Error, the global segmentation context instance has not been initialized.') return SegmentationContext._globalSegmentationContext @staticmethod def initWithRobot(model): sc = SegmentationContext(RobotModelGroundHeightProvider(model), RobotModelViewProvider(model)) SegmentationContext.installGlobalInstance(sc) @staticmethod def initWithCamera(camera, userGroundHeight): sc = SegmentationContext(UserGroundHeightProvider(userGroundHeight), CameraViewProvider(camera)) SegmentationContext.installGlobalInstance(sc) @staticmethod def initWithUser(userGroundHeight, userViewFrame, viewAxis=0): sc = SegmentationContext(UserGroundHeightProvider(userGroundHeight), UserViewProvider(userViewFrame, viewAxis)) SegmentationContext.installGlobalInstance(sc) class RobotModelGroundHeightProvider(object): def __init__(self, model): self.model = model def getGroundHeight(self): from director.footstepsdriver import FootstepsDriver return FootstepsDriver.getFeetMidPoint(self.model).GetPosition()[2] class RobotModelViewProvider(object): def __init__(self, model): self.model = model def getViewFrame(self): return self.model.getLinkFrame(self.model.getHeadLink()) def getViewOrigin(self): headFrame = self.model.getLinkFrame(self.model.getHeadLink()) return np.array(headFrame.GetPosition()) def getViewDirection(self): headFrame = self.model.getLinkFrame(self.model.getHeadLink()) viewDirection = [1,0,0] headFrame.TransformVector(viewDirection, viewDirection) return np.array(viewDirection) class UserGroundHeightProvider(object): def __init__(self, groundHeight): self.groundHeight = groundHeight def getGroundHeight(): return self.groundHeight class UserViewProvider(object): def __init__(self, viewFrame, viewAxis): self.viewFrame = viewFrame self.viewAxis = viewAxis def getViewFrame(self): return self.viewFrame def getViewOrigin(self): return np.array( self.viewFrame.GetPosition()) def getViewDirection(self): viewDirection = [0.0, 0.0, 0.0] viewDirection[self.viewAxis] = 1.0 self.viewFrame.TransformVector(viewDirection, viewDirection) return np.array(viewDirection) class CameraViewProvider(object): def __init__(self, camera): self.camera = camera def getViewFrame(self): return self.camera.GetViewTransformObject() def getViewOrigin(self): return np.array(self.camera.GetViewPosition()) def getViewDirection(self): return np.array(self.camera.GetViewDirection()) def getDebugFolder(): obj = om.findObjectByName('debug') if obj is None: obj = om.getOrCreateContainer('debug', om.getOrCreateContainer('segmentation')) om.collapse(obj) return obj def applyLineFit(dataObj, distanceThreshold=0.02): f = vtk.vtkPCLSACSegmentationLine() f.SetInputData(dataObj) f.SetDistanceThreshold(distanceThreshold) f.Update() origin = np.array(f.GetLineOrigin()) direction = np.array(f.GetLineDirection()) return origin, direction, shallowCopy(f.GetOutput()) def projectPointToPlane(point, origin, normal): projectedPoint = np.zeros(3) vtk.vtkPlane.ProjectPoint(point, origin, normal, projectedPoint) return projectedPoint def intersectLineWithPlane(line_point, line_ray, plane_point, plane_normal ): line_point = np.asarray(line_point) line_ray = np.asarray(line_ray) plane_point = np.asarray(plane_point) plane_normal = np.asarray(plane_normal) denom = np.dot( plane_normal , line_ray ) p0l0 = plane_point - line_point t = np.dot(p0l0, plane_normal) / denom intersection_point = line_point + t*line_ray return intersection_point def labelPointDistanceAlongAxis(polyData, axis, origin=None, resultArrayName='distance_along_axis'): points = vtkNumpy.getNumpyFromVtk(polyData, 'Points') if origin is not None: points = points - origin distanceValues = np.dot(points, axis) if origin is None: distanceValues -= np.nanmin(distanceValues) newData = shallowCopy(polyData) vtkNumpy.addNumpyToVtk(newData, distanceValues, resultArrayName) return newData def applyEuclideanClustering(dataObj, clusterTolerance=0.05, minClusterSize=100, maxClusterSize=1e6): f = vtk.vtkPCLEuclideanClusterExtraction() f.SetInputData(dataObj) f.SetClusterTolerance(clusterTolerance) f.SetMinClusterSize(int(minClusterSize)) f.SetMaxClusterSize(int(maxClusterSize)) f.Update() return shallowCopy(f.GetOutput()) def extractClusters(polyData, clusterInXY=False, **kwargs): if not polyData.GetNumberOfPoints(): return [] if (clusterInXY == True): polyDataXY = vtk.vtkPolyData() polyDataXY.DeepCopy(polyData) points=vtkNumpy.getNumpyFromVtk(polyDataXY , 'Points') points[:,2] = 0.0 polyDataXY = applyEuclideanClustering(polyDataXY, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyDataXY, 'cluster_labels') vtkNumpy.addNumpyToVtk(polyData, clusterLabels, 'cluster_labels') else: polyData = applyEuclideanClustering(polyData, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyData, 'cluster_labels') clusters = [] for i in range(1, clusterLabels.max() + 1): cluster = thresholdPoints(polyData, 'cluster_labels', [i, i]) clusters.append(cluster) return clusters def applyVoxelGrid(polyData, leafSize=0.01): v = vtk.vtkPCLVoxelGrid() v.SetLeafSize(leafSize, leafSize, leafSize) v.SetInputData(polyData) v.Update() return shallowCopy(v.GetOutput()) def labelOutliers(dataObj, searchRadius=0.03, neighborsInSearchRadius=10): f = vtk.vtkPCLRadiusOutlierRemoval() f.SetInputData(dataObj) f.SetSearchRadius(searchRadius) f.SetNeighborsInSearchRadius(int(neighborsInSearchRadius)) f.Update() return shallowCopy(f.GetOutput()) def sparsifyStereoCloud(polyData): polyData = applyVoxelGrid(polyData, leafSize=0.01) polyData = labelOutliers(polyData) vis.showPolyData(polyData, 'is_outlier', colorByName='is_outlier', visible=False, parent=getDebugFolder()) polyData = thresholdPoints(polyData, 'is_outlier', [0.0, 0.0]) return polyData
BSD 3-Clause New or Revised License
what-studio/profiling
profiling/profiler.py
Profiler.exclude_code
python
def exclude_code(self, code): try: self.stats.remove_child(code) except KeyError: pass
Excludes statistics of the given code.
https://github.com/what-studio/profiling/blob/824ae554b35909ecbb8aead8853429573e70aeee/profiling/profiler.py#L58-L63
from __future__ import absolute_import try: import cPickle as pickle except ImportError: import pickle import time from profiling.stats import RecordingStatistics from profiling.utils import frame_stack, Runnable from profiling.viewer import StatisticsTable, StatisticsViewer __all__ = ['Profiler', 'ProfilerWrapper'] class Profiler(Runnable): table_class = StatisticsTable stats = None base_frame = None base_code = None ignored_frames = () ignored_codes = () def __init__(self, base_frame=None, base_code=None, ignored_frames=(), ignored_codes=()): self.base_frame = base_frame self.base_code = base_code self.ignored_frames = ignored_frames self.ignored_codes = ignored_codes self.stats = RecordingStatistics() def start(self): self._cpu_time_started = time.clock() self._wall_time_started = time.time() self.stats.clear() return super(Profiler, self).start() def frame_stack(self, frame): return frame_stack(frame, self.base_frame, self.base_code, self.ignored_frames, self.ignored_codes)
BSD 3-Clause New or Revised License
hyperledger/aries-protocol-test-suite
protocol_tests/connection/__init__.py
DIDDoc.dereference_key
python
def dereference_key(self, key: str) -> str: if not self.is_reference(key): return key key_reference = key found_key = next(( public_key['publicKeyBase58'] for public_key in self.get('publicKey', []) if key_reference == public_key['id'] ), None) if not found_key: raise KeyReferenceError( 'No key found for reference {}'.format(key) ) return found_key
Dereference a key from the publicKey array. If key is not a reference, simply return the key.
https://github.com/hyperledger/aries-protocol-test-suite/blob/f60891693347a2544173def752eea7ee7db4315f/protocol_tests/connection/__init__.py#L113-L133
import json import re import uuid import base64 from collections import namedtuple from voluptuous import Schema, Optional, And, Extra, Match, Any, Exclusive from aries_staticagent import Message, crypto, route from ..schema import MessageSchema, AtLeastOne from .. import BaseHandler, Suite TheirInfo = namedtuple( 'TheirInfo', 'endpoint, recipients, routing_keys' ) class KeyReferenceError(Exception): class NoSuitableService(Exception): class DIDDoc(dict): EXPECTED_SERVICE_TYPE = 'IndyAgent' EXPECTED_SERVICE_SUFFIX = 'indy' PUBLIC_KEY_VALIDATOR = Schema({ "id": str, "type": "Ed25519VerificationKey2018", "controller": str, "publicKeyBase58": str }) VALIDATOR = Schema({ "@context": "https://w3id.org/did/v1", "id": str, "publicKey": [PUBLIC_KEY_VALIDATOR], Optional("authentication"): [ { "type": "Ed25519SignatureAuthentication2018", "publicKey": str }, PUBLIC_KEY_VALIDATOR, str ], "service": And( AtLeastOne( { 'id': Match('.*;{}$'.format(EXPECTED_SERVICE_SUFFIX)), 'type': EXPECTED_SERVICE_TYPE, 'priority': int, 'recipientKeys': [str], Optional('routingKeys'): [str], 'serviceEndpoint': str }, msg='DID Communication service missing' ), [ { "id": str, "type": str, "serviceEndpoint": str, Extra: object } ], ) }) def validate(self): self.update(DIDDoc.VALIDATOR(self)) @classmethod def make(cls, my_did, my_vk, endpoint): return cls({ "@context": "https://w3id.org/did/v1", "id": my_did, "publicKey": [{ "id": my_did + "#keys-1", "type": "Ed25519VerificationKey2018", "controller": my_did, "publicKeyBase58": my_vk }], "service": [{ "id": my_did + ";indy", "type": "IndyAgent", "recipientKeys": [my_vk], "routingKeys": [], "serviceEndpoint": endpoint, }], }) @classmethod def is_reference(cls, key: str): return '#' in key
Apache License 2.0
openstack-archive/deb-python-proliantutils
proliantutils/ilo/ribcl.py
RIBCLOperations.set_pending_boot_mode
python
def set_pending_boot_mode(self, value): dic = {'value': value} data = self._execute_command( 'SET_PENDING_BOOT_MODE', 'SERVER_INFO', 'write', dic) return data
Configures the boot mode of the system from a specific boot mode.
https://github.com/openstack-archive/deb-python-proliantutils/blob/b9229a0ab3e7c7af0b9e59968a5c6c7fea53bd88/proliantutils/ilo/ribcl.py#L480-L485
import copy import os import re import xml.etree.ElementTree as etree from oslo_utils import strutils import requests from requests.packages import urllib3 from requests.packages.urllib3 import exceptions as urllib3_exceptions import six from proliantutils import exception from proliantutils.ilo import common from proliantutils.ilo import firmware_controller from proliantutils.ilo import operations from proliantutils import log POWER_STATE = { 'ON': 'Yes', 'OFF': 'No', } BOOT_MODE_CMDS = [ 'GET_CURRENT_BOOT_MODE', 'GET_PENDING_BOOT_MODE', 'GET_SUPPORTED_BOOT_MODE', 'SET_PENDING_BOOT_MODE' ] LOG = log.get_logger(__name__) class MaskedRequestData(object): def __init__(self, request_data): self.request_data = request_data def __str__(self): request_data_copy = copy.deepcopy(self.request_data) xml_data = request_data_copy.get('data') if xml_data: xml_data = re.sub(r'USER_LOGIN="(.*?)"', r'USER_LOGIN="*****"', xml_data) xml_data = re.sub(r'PASSWORD="(.*?)"', r'PASSWORD="*****"', xml_data) request_data_copy['data'] = xml_data return str(request_data_copy) class RIBCLOperations(operations.IloOperations): def __init__(self, host, login, password, timeout=60, port=443, cacert=None): self.host = host self.login = login self.password = password self.timeout = timeout self.port = port self.cacert = cacert if self.cacert is None: urllib3.disable_warnings(urllib3_exceptions.InsecureRequestWarning) def init_model_based_tags(self, model): self.model = model if 'G7' in self.model: self.MEMORY_SIZE_TAG = "MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed" self.NIC_INFORMATION_TAG = "NIC_INFOMATION" else: self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE" self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A" self.NIC_INFORMATION_TAG = "NIC_INFORMATION" def _request_ilo(self, root, extra_headers=None): if self.port: urlstr = 'https://%s:%d/ribcl' % (self.host, self.port) else: urlstr = 'https://%s/ribcl' % (self.host) xml = self._serialize_xml(root) headers = {"Content-length": str(len(xml))} if extra_headers: headers.update(extra_headers) kwargs = {'headers': headers, 'data': xml} if self.cacert is not None: kwargs['verify'] = self.cacert else: kwargs['verify'] = False try: LOG.debug(self._("POST %(url)s with request data: " "%(request_data)s"), {'url': urlstr, 'request_data': MaskedRequestData(kwargs)}) response = requests.post(urlstr, **kwargs) response.raise_for_status() except Exception as e: LOG.debug(self._("Unable to connect to iLO. %s"), e) raise exception.IloConnectionError(e) return response.text def _create_dynamic_xml(self, cmdname, tag_name, mode, subelements=None): root = etree.Element('RIBCL', VERSION="2.0") login = etree.SubElement( root, 'LOGIN', USER_LOGIN=self.login, PASSWORD=self.password) tagname = etree.SubElement(login, tag_name, MODE=mode) subelements = subelements or {} etree.SubElement(tagname, cmdname) if six.PY2: root_iterator = root.getiterator(cmdname) else: root_iterator = root.iter(cmdname) for cmd in root_iterator: for key, value in subelements.items(): cmd.set(key, value) return root def _serialize_xml(self, root): if hasattr(etree, 'tostringlist'): if six.PY3: xml_content_list = [ x.decode("utf-8") for x in etree.tostringlist(root)] else: xml_content_list = etree.tostringlist(root) xml = '\r\n'.join(xml_content_list) + '\r\n' else: if six.PY3: xml_content = etree.tostring(root).decode("utf-8") else: xml_content = etree.tostring(root) xml = xml_content + '\r\n' return xml def _parse_output(self, xml_response): count = 0 xml_dict = {} resp_message = None xml_start_pos = [] for m in re.finditer(r"\<\?xml", xml_response): xml_start_pos.append(m.start()) while count < len(xml_start_pos): if (count == len(xml_start_pos) - 1): result = xml_response[xml_start_pos[count]:] else: start = xml_start_pos[count] end = xml_start_pos[count + 1] result = xml_response[start:end] result = result.strip() message = etree.fromstring(result) resp = self._validate_message(message) if hasattr(resp, 'tag'): xml_dict = self._elementtree_to_dict(resp) elif resp is not None: resp_message = resp count = count + 1 if xml_dict: return xml_dict elif resp_message is not None: return resp_message def _elementtree_to_dict(self, element): node = {} text = getattr(element, 'text') if text is not None: text = text.strip() if len(text) != 0: node['text'] = text node.update(element.items()) child_nodes = {} for child in element: child_nodes.setdefault(child.tag, []).append( self._elementtree_to_dict(child)) for key, value in child_nodes.items(): if len(value) == 1: child_nodes[key] = value[0] node.update(child_nodes.items()) return node def _validate_message(self, message): if message.tag != 'RIBCL': status = -1 raise exception.IloClientInternalError(message, status) for child in message: if child.tag != 'RESPONSE': return message status = int(child.get('STATUS'), 16) msg = child.get('MESSAGE') if status == 0 and msg != 'No error': return msg if status != 0: if 'syntax error' in msg or 'Feature not supported' in msg: for cmd in BOOT_MODE_CMDS: if cmd in msg: platform = self.get_product_name() msg = ("%(cmd)s is not supported on %(platform)s" % {'cmd': cmd, 'platform': platform}) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise (exception.IloCommandNotSupportedError (msg, status)) else: LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloClientInternalError(msg, status) if (status in exception.IloLoginFailError.statuses or msg in exception.IloLoginFailError.messages): LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloLoginFailError(msg, status) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloError(msg, status) def _execute_command(self, create_command, tag_info, mode, dic={}): xml = self._create_dynamic_xml( create_command, tag_info, mode, dic) d = self._request_ilo(xml) data = self._parse_output(d) LOG.debug(self._("Received response data: %s"), data) return data def get_all_licenses(self): data = self._execute_command('GET_ALL_LICENSES', 'RIB_INFO', 'read') d = {} for key, val in data['GET_ALL_LICENSES']['LICENSE'].items(): if isinstance(val, dict): d[key] = data['GET_ALL_LICENSES']['LICENSE'][key]['VALUE'] return d def get_product_name(self): data = self._execute_command( 'GET_PRODUCT_NAME', 'SERVER_INFO', 'read') return data['GET_PRODUCT_NAME']['PRODUCT_NAME']['VALUE'] def get_host_power_status(self): data = self._execute_command( 'GET_HOST_POWER_STATUS', 'SERVER_INFO', 'read') return data['GET_HOST_POWER']['HOST_POWER'] def get_one_time_boot(self): data = self._execute_command( 'GET_ONE_TIME_BOOT', 'SERVER_INFO', 'read') return data['ONE_TIME_BOOT']['BOOT_TYPE']['VALUE'] def get_vm_status(self, device='FLOPPY'): dic = {'DEVICE': device.upper()} data = self._execute_command( 'GET_VM_STATUS', 'RIB_INFO', 'read', dic) return data['GET_VM_STATUS'] def reset_server(self): data = self._execute_command('RESET_SERVER', 'SERVER_INFO', 'write') return data def press_pwr_btn(self): data = self._execute_command('PRESS_PWR_BTN', 'SERVER_INFO', 'write') return data def hold_pwr_btn(self): dic = {'TOGGLE': 'NO'} data = self._execute_command( 'HOLD_PWR_BTN', 'SERVER_INFO', 'write', dic) return data def set_host_power(self, power): if power.upper() in POWER_STATE: dic = {'HOST_POWER': POWER_STATE[power.upper()]} data = self._execute_command( 'SET_HOST_POWER', 'SERVER_INFO', 'write', dic) return data else: raise exception.IloInvalidInputError( "Invalid input. The expected input is ON or OFF.") def set_one_time_boot(self, value): dic = {'value': value} data = self._execute_command( 'SET_ONE_TIME_BOOT', 'SERVER_INFO', 'write', dic) return data def insert_virtual_media(self, url, device='FLOPPY'): dic = { 'DEVICE': device.upper(), 'IMAGE_URL': url, } data = self._execute_command( 'INSERT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic) return data def eject_virtual_media(self, device='FLOPPY'): vm_status = self.get_vm_status(device=device) if vm_status['IMAGE_INSERTED'] == 'NO': return dic = {'DEVICE': device.upper()} self._execute_command( 'EJECT_VIRTUAL_MEDIA', 'RIB_INFO', 'write', dic) def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): dic = {'DEVICE': device.upper()} xml = self._create_dynamic_xml( 'SET_VM_STATUS', 'RIB_INFO', 'write', dic) if six.PY2: child_iterator = xml.getiterator() else: child_iterator = xml.iter() for child in child_iterator: if child.tag == 'SET_VM_STATUS': etree.SubElement(child, 'VM_BOOT_OPTION', VALUE=boot_option.upper()) etree.SubElement(child, 'VM_WRITE_PROTECT', VALUE=write_protect.upper()) d = self._request_ilo(xml) data = self._parse_output(d) return data def get_current_boot_mode(self): data = self._execute_command( 'GET_CURRENT_BOOT_MODE', 'SERVER_INFO', 'read') return data['GET_CURRENT_BOOT_MODE']['BOOT_MODE']['VALUE'] def get_pending_boot_mode(self): data = self._execute_command( 'GET_PENDING_BOOT_MODE', 'SERVER_INFO', 'read') return data['GET_PENDING_BOOT_MODE']['BOOT_MODE']['VALUE'] def get_supported_boot_mode(self): data = self._execute_command( 'GET_SUPPORTED_BOOT_MODE', 'SERVER_INFO', 'read') return data['GET_SUPPORTED_BOOT_MODE']['SUPPORTED_BOOT_MODE']['VALUE']
Apache License 2.0
rsmusllp/king-phisher
king_phisher/client/web_cloner.py
WebPageCloner.wait
python
def wait(self): while not self.load_started: gui_utilities.gtk_sync() while self.webview.get_property('is-loading') or len(self.__web_resources): gui_utilities.gtk_sync() self.webview.destroy() return not self.load_failed
Wait for the cloning operation to complete and return whether the operation was successful or not. :return: True if the operation was successful. :rtype: bool
https://github.com/rsmusllp/king-phisher/blob/6acbbd856f849d407cc904c075441e0cf13c25cf/king_phisher/client/web_cloner.py#L251-L264
import codecs import collections import logging import os import re import string import sys import urllib from king_phisher.client import gui_utilities import requests if sys.version_info[0] < 3: import urlparse urllib.parse = urlparse else: import urllib.parse try: from gi.repository import WebKit2 has_webkit2 = True except ImportError: has_webkit2 = False class ClonedResourceDetails(collections.namedtuple('ClonedResourceDetails', ('resource', 'mime_type', 'size', 'file_name'))): pass class WebPageCloner(object): def __init__(self, target_url, dest_dir): if not has_webkit2: raise RuntimeError('cloning requires WebKit2GTK+') self.target_url = urllib.parse.urlparse(target_url) dest_dir = os.path.abspath(dest_dir) if not os.path.exists(dest_dir): os.mkdir(dest_dir) self.dest_dir = os.path.abspath(os.path.normpath(dest_dir)) self.logger = logging.getLogger('KingPhisher.Client.WebPageScraper') self.cloned_resources = collections.OrderedDict() self.load_started = False self.load_failed_event = None self.__web_resources = [] self.webview = WebKit2.WebView() web_context = self.webview.get_context() web_context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER) web_context.set_tls_errors_policy(WebKit2.TLSErrorsPolicy.IGNORE) self.webview.connect('decide-policy', self.signal_decide_policy) self.webview.connect('load-changed', self.signal_load_changed) self.webview.connect('load-failed', self.signal_load_failed) self.webview.connect('resource-load-started', self.signal_resource_load_started) self.webview.load_uri(self.target_url_str) def _webkit_empty_resource_bug_workaround(self, url, expected_len): try: response = requests.get(url, timeout=10) except requests.exceptions.RequestException: self.logger.warning('failed to request the empty resource with python') return '' if response.status_code < 200 or response.status_code > 299: self.logger.warning("requested the empty resource with python, but received status: {0} ({1})".format(response.status_code, response.reason)) return '' data = response.content if len(data) != expected_len: self.logger.warning('requested the empty resource with python, but the length appears invalid') return data @property def load_failed(self): return self.load_failed_event != None @property def target_url_str(self): return urllib.parse.urlunparse(self.target_url) def copy_resource_data(self, resource, data): resource_url_str = resource.get_property('uri') resource_url = urllib.parse.urlparse(resource_url_str) resource_path = os.path.split(resource_url.path)[0].lstrip('/') resource_path = urllib.parse.unquote(resource_path) directory = self.dest_dir for part in resource_path.split('/'): directory = os.path.join(directory, part) if not os.path.exists(directory): os.mkdir(directory) mime_type = None charset = 'utf-8' response = resource.get_response() if response and hasattr(response, 'get_http_headers'): mime_type = response.get_http_headers().get('content-type') if mime_type and ';' in mime_type: mime_type, charset = mime_type.split(';', 1) charset = charset.strip() if charset.startswith('charset='): charset = charset[8:].strip() resource_path = urllib.parse.unquote(resource_url.path) if resource_path.endswith('/'): resource_path += 'index.html' resource_path = resource_path.lstrip('/') resource_path = os.path.join(self.dest_dir, resource_path) if mime_type == 'text/html': data = self.patch_html(data, charset) with open(resource_path, 'wb') as file_h: file_h.write(data) crd = ClonedResourceDetails(urllib.parse.unquote(resource_url.path), mime_type, len(data), resource_path) self.cloned_resources[resource_url.path] = crd self.logger.debug("wrote {0:,} bytes to {1}".format(crd.size, resource_path)) def patch_html(self, data, encoding='utf-8'): try: codec = codecs.lookup(encoding) except LookupError as error: self.logger.warning('failed to decode data from web response, ' + error.args[0]) return data try: data = codec.decode(data)[0] except Exception as error: self.logger.error("failed to decode data from web response ({0}) using encoding {1}".format(error.__class__.__name__, encoding)) return data match = re.search(r'</head>', data, flags=re.IGNORECASE) if not match: return codec.encode(data)[0] end_head = match.start(0) patched = '' patched += data[:end_head] patched += '<script src="/kp.js" type="text/javascript"></script>' ws_cursor = end_head - 1 while ws_cursor > 0 and data[ws_cursor] in string.whitespace: ws_cursor -= 1 patched += data[ws_cursor + 1:end_head] patched += data[end_head:] return codec.encode(patched)[0] def resource_is_on_target(self, resource): resource_url = urllib.parse.urlparse(resource.get_property('uri')) if resource_url.netloc.lower() != self.target_url.netloc.lower(): return False if resource_url.scheme.lower() != self.target_url.scheme.lower(): return False rport = resource_url.port or (443 if resource_url.scheme == 'https' else 80) tport = self.target_url.port or (443 if self.target_url.scheme == 'https' else 80) if rport != tport: return False return True def stop_cloning(self): if self.webview.get_property('is-loading'): self.webview.stop_loading()
BSD 3-Clause New or Revised License
blurstudio/cross3d
cross3d/studiomax/studiomaxscenematerial.py
StudiomaxSceneMaterial._nativeSubmaterials
python
def _nativeSubmaterials(self): mtl = self._nativePointer if mxs.classof(mtl) == mxs.MultiMaterial: return [mtl[i] for i in range(mtl.numsubs)] else: get_submtl = mxs.getSubMtl return [get_submtl(mtl, i+1) for i in range(mxs.getNumSubMtls(mtl))]
The native submaterials of this material.
https://github.com/blurstudio/cross3d/blob/277968d1227de740fc87ef61005c75034420eadf/cross3d/studiomax/studiomaxscenematerial.py#L22-L32
import os.path from Py3dsMax import mxs from cross3d.constants import MaterialType, MapType from cross3d.abstract.abstractscenematerial import AbstractSceneMaterial from cross3d import Scene class StudiomaxSceneMaterial(AbstractSceneMaterial):
MIT License
geertj/python-ad
lib/ad/protocol/asn1.py
Encoder.write
python
def write(self, value, nr=None, typ=None, cls=None): if self.m_stack is None: raise Error, 'Encoder not initialized. Call start() first.' if nr is None: if isinstance(value, int) or isinstance(value, long): nr = Integer elif isinstance(value, str) or isinstance(value, unicode): nr = OctetString elif value is None: nr = Null if typ is None: typ = TypePrimitive if cls is None: cls = ClassUniversal value = self._encode_value(nr, value) self._emit_tag(nr, typ, cls) self._emit_length(len(value)) self._emit(value)
Write a primitive data value.
https://github.com/geertj/python-ad/blob/3089eae072bd2e871c11251961ec35a09b83dd38/lib/ad/protocol/asn1.py#L64-L82
Boolean = 0x01 Integer = 0x02 OctetString = 0x04 Null = 0x05 ObjectIdentifier = 0x06 Enumerated = 0x0a Sequence = 0x10 Set = 0x11 TypeConstructed = 0x20 TypePrimitive = 0x00 ClassUniversal = 0x00 ClassApplication = 0x40 ClassContext = 0x80 ClassPrivate = 0xc0 import re class Error(Exception): class Encoder(object): def __init__(self): self.m_stack = None def start(self): self.m_stack = [[]] def enter(self, nr, cls=None): if self.m_stack is None: raise Error, 'Encoder not initialized. Call start() first.' if cls is None: cls = ClassUniversal self._emit_tag(nr, TypeConstructed, cls) self.m_stack.append([]) def leave(self): if self.m_stack is None: raise Error, 'Encoder not initialized. Call start() first.' if len(self.m_stack) == 1: raise Error, 'Tag stack is empty.' value = ''.join(self.m_stack[-1]) del self.m_stack[-1] self._emit_length(len(value)) self._emit(value)
MIT License
michael-wzhu/daguan_competition_2021_codes
src/Megatron-LM/tasks/data_utils.py
build_tokens_types_paddings_from_text
python
def build_tokens_types_paddings_from_text(text_a, text_b, tokenizer, max_seq_length): text_a_ids = tokenizer.tokenize(text_a) text_b_ids = None if text_b is not None: text_b_ids = tokenizer.tokenize(text_b) return build_tokens_types_paddings_from_ids(text_a_ids, text_b_ids, max_seq_length, tokenizer.cls, tokenizer.sep, tokenizer.pad)
Build token types and paddings, trim if needed, and pad if needed.
https://github.com/michael-wzhu/daguan_competition_2021_codes/blob/ab61f7e3ed11802759baf05b2b309448e38cacba/src/Megatron-LM/tasks/data_utils.py#L48-L59
import re import numpy as np def clean_text(text): text = text.replace("\n", " ") text = re.sub(r'\s+', ' ', text) for _ in range(3): text = text.replace(' . ', '. ') return text def build_sample(ids, types, paddings, label, unique_id): ids_np = np.array(ids, dtype=np.int64) types_np = np.array(types, dtype=np.int64) paddings_np = np.array(paddings, dtype=np.int64) sample = ({'text': ids_np, 'types': types_np, 'padding_mask': paddings_np, 'label': int(label), 'uid': int(unique_id)}) return sample
Apache License 2.0
stmsolutions/boobsnail
excel4lib/macro/excel4_macro.py
Excel4Macro._obfuscate_variable_values
python
def _obfuscate_variable_values(self): if not self.obfuscator: return for f in (self.ordered_calls): if not f._obfuscate: continue if issubclass(type(f), Excel4Variable): obfuscated = self.obfuscator.obfuscate_variable_value(f) for o in obfuscated: self.worksheet.add_above(o, f)
Obfuscates variable values
https://github.com/stmsolutions/boobsnail/blob/c0c2067d7271ca76ee721998d28e8c3c81a48397/excel4lib/macro/excel4_macro.py#L439-L457
from excel4lib.sheet import * from .excel4_instruction import * from .excel4_formula import * from .excel4_variable import * from .excel4_value import * from .excel4_argument import * from .excel4_register_formula import * from excel4lib.config import * from .excel4_argument import * class Excel4Macro(object): def __init__(self, name, obfuscator = None, analysis = None, routines=None): self.name = name self.worksheet = Worksheet(name) self.obfuscator = obfuscator self.analysis = analysis self.routines = routines if self.obfuscator: self.obfuscator.set_macro(self) if self.analysis: self.analysis.set_macro(self) if self.routines: self.routines.set_macro(self) self.ordered_calls = [] self.obfuscated_formulas = [] self.automatic_obfuscate = Excel4Config.obfuscator.automatic_obfuscation self.config = Excel4Config self.trigger_x = 1 self.trigger_y = 1 def set_cords(self, x=None, y=None): if not x: x = self.worksheet._curr_x if not y: y = self.worksheet._curr_y self.worksheet.set_current_cords(x,y) def set_trigger_cords(self, x, y): self.trigger_x = x self.trigger_y = y def to_csv(self): self.obfuscate_all() return self.worksheet.to_csv(Excel4Config.csv_separator) def to_csv_file(self, filename=None): if not filename: filename = self.name + ".csv" write_file(filename, self.to_csv()) def _reserve_cells(self, x, y, amount): formulas = [] if self.worksheet.is_reserved(x, y, amount): return formulas for i in range(0, amount): formulas.append(Excel4Value(x,y+amount)) self.worksheet.add_cell(formulas[i]) return formulas def random_add_to_worksheet(self, formulas): curr_x, curr_y = self.worksheet.get_current_cords() target_x, target_y = self._gen_random_cords(len(formulas)) for f in formulas: f.x = target_x f.y = target_y self._add_to_worksheet(f) target_y = target_y + 1 self.worksheet.set_current_cords(curr_x, curr_y) def _add_to_worksheet(self, cell): if not issubclass(type(cell), Cell): pass self.worksheet.add_cell(cell) self.ordered_calls.append(cell) def _create_logical_test(self, value1, operator, value2): o = Excel4LogicalTest(value1, operator, value2) return o def _create_argument_object(self, instruction, *args): o = Excel4FormulaArgument(instruction, *args) return o def _create_formula(self, x, y, instruction, *args): o = Excel4Formula(x, y, instruction, *args) return o def _create_go_to(self, x, y, formula): instruction_name = Excel4InstructionName("GOTO") instruction_name.translate = True o = Excel4GoToFormula(x, y, instruction_name, formula) return o def _create_loop(self, x, y, instruction, *args): o = Excel4LoopFormula(x, y, instruction, *args) return o def _create_condition(self, x, y, instruction, *args): o = Excel4ConditionFormula(x, y, instruction, *args) return o def _create_end_loop(self, x, y, instruction, *args): o = Excel4EndLoopFormula(x, y, instruction, *args) return o def _create_value(self, x, y, value): o = Excel4Value(x, y, value) return o def _create_variable(self, x, y, name, value): o = Excel4Variable(x, y, name, value) return o def _create_register(self, x, y, dll_name, exported_function, type_text, function_text=""): if not function_text: function_text = random_string(random.randint(4, 8)) formula = Excel4RegisterFormula(x, y, dll_name, exported_function, type_text, function_text) return formula def obfuscate_all(self): if (not self.obfuscator) or (not self.config.obfuscator.enable): return lang_b = Excel4Translator.language Excel4Translator.language = Excel4Translator.native_language if not self.config.obfuscator.translate: for f in self.ordered_calls: if issubclass(type(f), Excel4Value): f.set_language(Excel4Translator.native_language) if self.config.obfuscator.obfuscate_variable_names: self._obfuscate_variable_names() if self.config.obfuscator.obfuscate_registered_functions: self._obfuscate_function_names() if self.config.obfuscator.spread_cells: self._spread_cells() if self.config.obfuscator.generate_noise: self._generate_noise() if self.config.obfuscator.obfuscate_variable_values: self._obfuscate_variable_values() if self.config.obfuscator.obfuscate_formulas: self._obfuscate_formulas() Excel4Translator.language = lang_b def _spread_cells(self): if not self.obfuscator: return self.obfuscator._spread_formulas(self.trigger_x, self.trigger_y) def _generate_noise(self): if not self.obfuscator: return self.obfuscator._generate_noise() def _obfuscate_function_names(self): if not self.obfuscator: return for f in (self.ordered_calls): if not f._obfuscate: continue if issubclass(type(f), Excel4RegisterFormula): self.obfuscator.obfuscate_function_name(f) def _obfuscate_variable_names(self): if not self.obfuscator: return for f in (self.ordered_calls): if not f._obfuscate: continue if issubclass(type(f), Excel4Variable): self.obfuscator.obfuscate_variable_name(f)
MIT License
automl/auto-sklearn
autosklearn/metalearning/metalearning/kNearestDatasets/kND.py
KNearestDatasets.fit
python
def fit(self, metafeatures, runs): assert isinstance(metafeatures, pd.DataFrame) assert metafeatures.values.dtype in (np.float32, np.float64) assert np.isfinite(metafeatures.values).all() assert isinstance(runs, pd.DataFrame) assert runs.shape[1] == metafeatures.shape[0], (runs.shape[1], metafeatures.shape[0]) self.metafeatures = metafeatures self.runs = runs self.num_datasets = runs.shape[1] self.scaler.fit(self.metafeatures) best_configuration_per_dataset = {} for dataset_name in runs: if not np.isfinite(runs[dataset_name]).any(): best_configuration_per_dataset[dataset_name] = None else: configuration_idx = runs[dataset_name].index[ np.nanargmin(runs[dataset_name].values)] best_configuration_per_dataset[dataset_name] = configuration_idx self.best_configuration_per_dataset = best_configuration_per_dataset if callable(self.metric): self._metric = self.metric self._p = 0 elif self.metric.lower() == "l1": self._metric = "minkowski" self._p = 1 elif self.metric.lower() == "l2": self._metric = "minkowski" self._p = 2 else: raise ValueError(self.metric) self._nearest_neighbors = NearestNeighbors( n_neighbors=self.num_datasets, radius=None, algorithm="brute", leaf_size=30, metric=self._metric, p=self._p, metric_params=self.metric_params)
Fit the Nearest Neighbor model. Parameters ---------- metafeatures : pandas.DataFrame A pandas dataframe. Each row represents a dataset, each column a metafeature. runs : dict Dictionary containing a list of runs for each dataset.
https://github.com/automl/auto-sklearn/blob/1fa4482dbd6be2e97cba1b62db0404c27479978c/autosklearn/metalearning/metalearning/kNearestDatasets/kND.py#L25-L77
import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from sklearn.preprocessing import MinMaxScaler import sklearn.utils class KNearestDatasets(object): def __init__(self, logger, metric='l1', random_state=None, metric_params=None): self.logger = logger self.metric = metric self.model = None self.metric_params = metric_params self.metafeatures = None self.runs = None self.best_configuration_per_dataset = None self.random_state = sklearn.utils.check_random_state(random_state) self.scaler = MinMaxScaler() if self.metric_params is None: self.metric_params = {}
BSD 3-Clause New or Revised License
datera/targetcli
targetcli/cli.py
Cli._complete_one_option
python
def _complete_one_option(self, text, line, begidx, endidx, options): prev_options = line.split()[1:] if text: prev_options = prev_options[:-1] return ["%s " % name for name in options if name.startswith(text) if not prev_options]
Helper to autocomplete a single option out of options.
https://github.com/datera/targetcli/blob/a1251821e1f6effeccb196a7a70544ab44fe2108/targetcli/cli.py#L78-L88
import pyparsing as pp import sys, tty, cmd, termios, readline, traceback import rtslib.config, rtslib.config_tree from targetcli.cli_logger import logger as log from rtslib.config import ConfigError class CliError(Exception): pass class Cli(cmd.Cmd): intro = '' log_levels = {'debug': 10, 'info': 20, 'warning': 30, 'error': 40, 'critical': 50} def __init__(self, interactive, history_path): cmd.Cmd.__init__(self) self.debug_level = 'off' self.last_traceback = None self.interactive = interactive self.do_save_history = self.interactive if self.interactive: self.load_history() readline.set_completer_delims(' \t\n`~!@#$%^&*()=+[{]}\\|;\'",<>/?') def do_EOF(self, options): sys.stdout.write("exit\n") return self.do_exit(options) def _complete_options(self, text, line, begidx, endidx, options): prev_options = line.split()[1:] if text: prev_options = prev_options[:-1] return ["%s " % name for name in options if name.startswith(text) if name.strip() not in prev_options]
Apache License 2.0
googleapis/gapic-generator-python
tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_metrics_service_v2_delete_log_metric_sync.py
sample_delete_log_metric
python
def sample_delete_log_metric(): client = logging_v2.MetricsServiceV2Client() request = logging_v2.DeleteLogMetricRequest( metric_name="projects/{project}/metrics/{metric}", ) response = client.delete_log_metric(request=request)
Snippet for delete_log_metric
https://github.com/googleapis/gapic-generator-python/blob/582fed9c43bd8c1a3c5a9a7705fa2e39b729b910/tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_metrics_service_v2_delete_log_metric_sync.py#L30-L42
from google.cloud import logging_v2
Apache License 2.0
alexal1/insomniac
insomniac/db_models.py
InstagramProfile.log_change_profile_info_action
python
def log_change_profile_info_action(self, session_id, phase, profile_pic_url, name, description, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=ChangeProfileInfoAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) ChangeProfileInfoAction.create(action=action, profile_pic_url=profile_pic_url, name=name, description=description)
Create InsomniacAction record Create ChangeProfileInfoAction record
https://github.com/alexal1/insomniac/blob/1f819481cea13034848fcfdaad0e32d1a83846c5/insomniac/db_models.py#L274-L291
import uuid from collections import defaultdict from typing import Optional from peewee import * from playhouse.migrate import SqliteMigrator, migrate from insomniac.utils import * from insomniac.globals import executable_name DATABASE_NAME = f'{executable_name}.db' DATABASE_VERSION = 4 db = SqliteDatabase(DATABASE_NAME, autoconnect=False) class InsomniacModel(Model): class Meta: database = db class InstagramProfile(InsomniacModel): name = TextField(unique=True) class Meta: db_table = 'instagram_profiles' def start_session(self, app_id, app_version, args, profile_status, followers_count, following_count) -> uuid.UUID: with db.connection_context(): profile_info = InstagramProfileInfo.create(profile=self, status=profile_status, followers=followers_count, following=following_count) session = SessionInfo.create(app_id=app_id, app_version=app_version, args=args, profile_info=profile_info) return session.id def end_session(self, session_id): with db.connection_context(): session_info = SessionInfo.get(SessionInfo.id == session_id) session_info.end = datetime.now() session_info.save() def add_session(self, app_id, app_version, args, profile_status, followers_count, following_count, start, end): with db.connection_context(): profile_info = InstagramProfileInfo.create(profile=self, status=profile_status, followers=followers_count, following=following_count) SessionInfo.create(app_id=app_id, app_version=app_version, args=args, profile_info=profile_info, start=start, end=end) def update_profile_info(self, profile_status, followers_count, following_count): with db.connection_context(): InstagramProfileInfo.create(profile=self, status=profile_status.value, followers=followers_count, following=following_count) def get_latsest_profile_info(self) -> Optional['InstagramProfileInfo']: with db.connection_context(): query = InstagramProfileInfo.select() .where(InstagramProfileInfo.profile == self) .group_by(InstagramProfileInfo.profile) .having(InstagramProfileInfo.timestamp == fn.MAX(InstagramProfileInfo.timestamp)) for obj in query: return obj return None def log_get_profile_action(self, session_id, phase, username, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=GetProfileAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) GetProfileAction.create(action=action, target_user=username) def log_like_action(self, session_id, phase, username, source_type, source_name, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=LikeAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) LikeAction.create(action=action, target_user=username) if source_type is not None and source_name is not None: InsomniacActionSource.create(action=action, type=source_type, name=source_name) def log_follow_action(self, session_id, phase, username, source_type, source_name, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=FollowAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) FollowAction.create(action=action, target_user=username) if source_type is not None and source_name is not None: InsomniacActionSource.create(action=action, type=source_type, name=source_name) def log_story_watch_action(self, session_id, phase, username, source_type, source_name, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=StoryWatchAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) StoryWatchAction.create(action=action, target_user=username) if source_type is not None and source_name is not None: InsomniacActionSource.create(action=action, type=source_type, name=source_name) def log_comment_action(self, session_id, phase, username, comment, source_type, source_name, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=CommentAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) CommentAction.create(action=action, target_user=username, comment=comment) if source_type is not None and source_name is not None: InsomniacActionSource.create(action=action, type=source_type, name=source_name) def log_direct_message_action(self, session_id, phase, username, message, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=DirectMessageAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) DirectMessageAction.create(action=action, target_user=username, message=message) def log_unfollow_action(self, session_id, phase, username, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=UnfollowAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) UnfollowAction.create(action=action, target_user=username) def log_scrape_action(self, session_id, phase, username, source_type, source_name, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=ScrapeAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) ScrapeAction.create(action=action, target_user=username) if source_type is not None and source_name is not None: InsomniacActionSource.create(action=action, type=(source_type if source_type is not None else None), name=source_name) def log_filter_action(self, session_id, phase, username, task_id='', execution_id='', timestamp=None): with db.connection_context(): session = SessionInfo.get(SessionInfo.id == session_id) action = InsomniacAction.create(actor_profile=self, type=FilterAction.__name__, task_id=task_id, execution_id=execution_id, session=session, phase=phase, timestamp=(timestamp if timestamp is not None else datetime.now())) FilterAction.create(action=action, target_user=username)
MIT License
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/signer/client.py
Client.list_signing_platforms
python
def list_signing_platforms(self, category: str = None, partner: str = None, target: str = None, maxResults: int = None, nextToken: str = None) -> Dict: pass
Lists all signing platforms available in AWS Signer that match the request parameters. If additional jobs remain to be listed, AWS Signer returns a ``nextToken`` value. Use this value in subsequent calls to ``ListSigningJobs`` to fetch the remaining values. You can continue calling ``ListSigningJobs`` with your ``maxResults`` parameter and with new values that AWS Signer returns in the ``nextToken`` parameter until all of your signing jobs have been returned. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/signer-2017-08-25/ListSigningPlatforms>`_ **Request Syntax** :: response = client.list_signing_platforms( category='string', partner='string', target='string', maxResults=123, nextToken='string' ) **Response Syntax** :: { 'platforms': [ { 'platformId': 'string', 'displayName': 'string', 'partner': 'string', 'target': 'string', 'category': 'AWSIoT', 'signingConfiguration': { 'encryptionAlgorithmOptions': { 'allowedValues': [ 'RSA'|'ECDSA', ], 'defaultValue': 'RSA'|'ECDSA' }, 'hashAlgorithmOptions': { 'allowedValues': [ 'SHA1'|'SHA256', ], 'defaultValue': 'SHA1'|'SHA256' } }, 'signingImageFormat': { 'supportedFormats': [ 'JSON', ], 'defaultFormat': 'JSON' }, 'maxSizeInMB': 123 }, ], 'nextToken': 'string' } **Response Structure** - *(dict) --* - **platforms** *(list) --* A list of all platforms that match the request parameters. - *(dict) --* Contains information about the signing configurations and parameters that is used to perform an AWS Signer job. - **platformId** *(string) --* The ID of an AWS Signer platform. - **displayName** *(string) --* The display name of an AWS Signer platform. - **partner** *(string) --* Any partner entities linked to an AWS Signer platform. - **target** *(string) --* The types of targets that can be signed by an AWS Signer platform. - **category** *(string) --* The category of an AWS Signer platform. - **signingConfiguration** *(dict) --* The configuration of an AWS Signer platform. This includes the designated hash algorithm and encryption algorithm of a signing platform. - **encryptionAlgorithmOptions** *(dict) --* The encryption algorithm options that are available for an AWS Signer job. - **allowedValues** *(list) --* The set of accepted encryption algorithms that are allowed in an AWS Signer job. - *(string) --* - **defaultValue** *(string) --* The default encryption algorithm that is used by an AWS Signer job. - **hashAlgorithmOptions** *(dict) --* The hash algorithm options that are available for an AWS Signer job. - **allowedValues** *(list) --* The set of accepted hash algorithms allowed in an AWS Signer job. - *(string) --* - **defaultValue** *(string) --* The default hash algorithm that is used in an AWS Signer job. - **signingImageFormat** *(dict) --* The signing image format that is used by an AWS Signer platform. - **supportedFormats** *(list) --* The supported formats of an AWS Signer signing image. - *(string) --* - **defaultFormat** *(string) --* The default format of an AWS Signer signing image. - **maxSizeInMB** *(integer) --* The maximum size (in MB) of code that can be signed by a AWS Signer platform. - **nextToken** *(string) --* Value for specifying the next set of paginated results to return. :type category: string :param category: The category type of a signing platform. :type partner: string :param partner: Any partner entities connected to a signing platform. :type target: string :param target: The validation template that is used by the target signing platform. :type maxResults: integer :param maxResults: The maximum number of results to be returned by this operation. :type nextToken: string :param nextToken: Value for specifying the next set of paginated results to return. After you receive a response with truncated results, use this parameter in a subsequent request. Set it to the value of ``nextToken`` from the response that you just received. :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/signer/client.py#L448-L560
from typing import Optional from botocore.client import BaseClient from typing import Dict from botocore.paginate import Paginator from botocore.waiter import Waiter from typing import Union class Client(BaseClient): def can_paginate(self, operation_name: str = None): pass def cancel_signing_profile(self, profileName: str): pass def describe_signing_job(self, jobId: str) -> Dict: pass def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None): pass def get_paginator(self, operation_name: str = None) -> Paginator: pass def get_signing_platform(self, platformId: str) -> Dict: pass def get_signing_profile(self, profileName: str) -> Dict: pass def get_waiter(self, waiter_name: str = None) -> Waiter: pass def list_signing_jobs(self, status: str = None, platformId: str = None, requestedBy: str = None, maxResults: int = None, nextToken: str = None) -> Dict: pass
MIT License
altair-viz/altair
altair/vegalite/v4/schema/core.py
load_schema
python
def load_schema(): return json.loads(pkgutil.get_data(__name__, 'vega-lite-schema.json').decode('utf-8'))
Load the json schema associated with this module's functions
https://github.com/altair-viz/altair/blob/159831869f84298af19b6d231c35204cf1c7b40d/altair/vegalite/v4/schema/core.py#L9-L11
from altair.utils.schemapi import SchemaBase, Undefined, _subclasses import pkgutil import json
BSD 3-Clause New or Revised License
openaddresses/machine
openaddr/ci/__init__.py
get_touched_pullrequest_files
python
def get_touched_pullrequest_files(pull_request, github_auth, app_logger): base_sha = pull_request['base']['sha'] head_sha = pull_request['head']['sha'] compare_url = pull_request['head']['repo']['compare_url'] compare_url = expand_uri(compare_url, dict(head=head_sha, base=base_sha)) app_logger.debug('Compare URL {}'.format(compare_url)) compare = get(compare_url, auth=github_auth).json() touched = set([file['filename'] for file in compare['files']]) app_logger.debug(u'Touched files {}'.format(', '.join(touched))) return touched
Return a set of files modified between master and payload head.
https://github.com/openaddresses/machine/blob/05db17d8492b3d8f4064f0f5b0ca9c68041c535a/openaddr/ci/__init__.py#L178-L192
import logging; _L = logging.getLogger('openaddr.ci') from .. import jobs, render, util, __version__ from .webcommon import nice_domain from .objects import ( add_job, write_job, read_job, complete_set, update_set_renders, set_run, RunState, get_completed_run, read_completed_set_runs ) from . import objects, work, queuedata from os.path import relpath, splitext, join, basename from datetime import timedelta, datetime from uuid import uuid4 from urllib.parse import urljoin from base64 import b64decode from tempfile import mkdtemp from functools import wraps from shutil import rmtree from time import time, sleep import threading, sys import json, os, re import socket from flask import Flask, request, Response, jsonify, render_template from requests import get, post, ConnectionError from uritemplate import expand as expand_uri from dateutil.tz import tzutc from psycopg2 import connect from pq import PQ import boto import psycopg2.extensions psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) def load_config(): def truthy(value): return bool(value.lower() in ('yes', 'true')) secrets_string = os.environ.get('WEBHOOK_SECRETS', u'').encode('utf8') webhook_secrets = secrets_string.split(b',') if secrets_string else [] return dict(GAG_GITHUB_STATUS=truthy(os.environ.get('GAG_GITHUB_STATUS', '')), REJECT_NEW_JOBS=truthy(os.environ.get('REJECT_NEW_JOBS', '')), GITHUB_AUTH=(os.environ['GITHUB_TOKEN'], 'x-oauth-basic'), GITHUB_OAUTH_CLIENT_ID=os.environ.get('GITHUB_CLIENT_ID'), GITHUB_OAUTH_SECRET=os.environ.get('GITHUB_SECRET'), GITHUB_OAUTH_CALLBACK=os.environ.get('GITHUB_CALLBACK'), MEMCACHE_SERVER=os.environ.get('MEMCACHE_SERVER'), DATABASE_URL=os.environ['DATABASE_URL'], AWS_S3_BUCKET=os.environ.get('AWS_S3_BUCKET', 'data.openaddresses.io'), WEBHOOK_SECRETS=webhook_secrets) TASK_QUEUE, DONE_QUEUE, DUE_QUEUE, HEARTBEAT_QUEUE = 'tasks', 'finished', 'due', 'heartbeat' DUETASK_DELAY = timedelta(minutes=5) RUN_REUSE_TIMEOUT = timedelta(days=10) WORKER_COOLDOWN = timedelta(seconds=5) GITHUB_RETRY_DELAY = timedelta(seconds=5) HEARTBEAT_INTERVAL = timedelta(minutes=5) RETEST_COMMENT_PAT = re.compile(r'^re-?run this,? please\b', re.IGNORECASE|re.MULTILINE) def td2str(td): return '{}s'.format(td.seconds + td.days * 86400) def process_github_payload(queue, request_url, app_logger, github_auth, webhook_name, webhook_payload, gag_status): app_logger.warning("Received event %s: %s", webhook_name, json.dumps(webhook_payload)) if skip_payload(webhook_name, webhook_payload): return True, {'url': None, 'files': [], 'skip': True} owner, repo, commit_sha, status_url, comments_url = get_commit_info(app_logger, webhook_payload, github_auth) if gag_status: status_url = None try: files = process_payload_files(webhook_payload, github_auth, app_logger) except Exception as e: message = 'Could not read source files: {}'.format(e) update_error_status(status_url, message, [], github_auth) _L.error(message, exc_info=True) return True, {'url': None, 'files': [], 'status_url': status_url} if not files: update_empty_status(status_url, github_auth) _L.warning('No files') return True, {'url': None, 'files': [], 'status_url': status_url} filenames = list(files.keys()) job_url_template = urljoin(request_url, u'/jobs/{id}') is_rerun = is_rerun_payload(webhook_payload) try: job_id = create_queued_job(queue, files, job_url_template, commit_sha, is_rerun, owner, repo, status_url, comments_url) job_url = expand_uri(job_url_template, dict(id=job_id)) except Exception as e: update_error_status(status_url, str(e), filenames, github_auth) _L.error('Oops', exc_info=True) return False, dict(error=str(e), files=files, status_url=status_url) else: update_pending_status(status_url, job_url, filenames, github_auth) return True, dict(id=job_id, url=job_url, files=files, status_url=status_url) def get_touched_pushevent_files(payload, app_logger): touched = set() for commit in payload['commits']: for filelist in (commit['added'], commit['modified']): touched.update(filelist) for filename in commit['removed']: if filename in touched: touched.remove(filename) app_logger.debug(u'Touched files {}'.format(', '.join(touched))) return touched def get_touched_pushevent_branch_files(payload, github_auth, app_logger): branch_sha = payload['head_commit']['id'] compare1_url = payload['repository']['compare_url'] compare1_url = expand_uri(compare1_url, dict(base='master', head=branch_sha)) app_logger.debug('Compare URL 1 {}'.format(compare1_url)) compare1 = get(compare1_url, auth=github_auth).json() merge_base_sha = compare1['merge_base_commit']['sha'] if merge_base_sha == branch_sha: return set() compare2_url = payload['repository']['compare_url'] compare2_url = expand_uri(compare2_url, dict(base=merge_base_sha, head=branch_sha)) app_logger.debug('Compare URL 2 {}'.format(compare2_url)) compare2 = get(compare2_url, auth=github_auth).json() touched = set([file['filename'] for file in compare2['files']]) app_logger.debug(u'Touched files {}'.format(', '.join(touched))) return touched
ISC License
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/accounts/accounts.py
DefaultAccount.authenticate
python
def authenticate(cls, username, password, ip="", **kwargs): errors = [] if ip: ip = str(ip) if ip and LOGIN_THROTTLE.check(ip): errors.append("Too many login failures; please try again in a few minutes.") return None, errors banned = cls.is_banned(username=username, ip=ip) if banned: errors.append( "|rYou have been banned and cannot continue from here." "\nIf you feel this ban is in error, please email an admin.|x" ) logger.log_sec(f"Authentication Denied (Banned): {username} (IP: {ip}).") LOGIN_THROTTLE.update(ip, "Too many sightings of banned artifact.") return None, errors account = authenticate(username=username, password=password) if not account: errors.append("Username and/or password is incorrect.") logger.log_sec(f"Authentication Failure: {username} (IP: {ip}).") if ip: LOGIN_THROTTLE.update(ip, "Too many authentication failures.") session = kwargs.get("session", None) if session: account = AccountDB.objects.get_account_from_name(username) if account: account.at_failed_login(session) return None, errors logger.log_sec(f"Authentication Success: {account} (IP: {ip}).") return account, errors
Checks the given username/password against the database to see if the credentials are valid. Note that this simply checks credentials and returns a valid reference to the user-- it does not log them in! To finish the job: After calling this from a Command, associate the account with a Session: - session.sessionhandler.login(session, account) ...or after calling this from a View, associate it with an HttpRequest: - django.contrib.auth.login(account, request) Args: username (str): Username of account password (str): Password of account ip (str, optional): IP address of client Kwargs: session (Session, optional): Session requesting authentication Returns: account (DefaultAccount, None): Account whose credentials were provided if not banned. errors (list): Error messages of any failures.
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/accounts/accounts.py#L457-L535
import re import time from django.conf import settings from django.contrib.auth import authenticate, password_validation from django.core.exceptions import ImproperlyConfigured, ValidationError from django.utils import timezone from django.utils.module_loading import import_string from evennia.typeclasses.models import TypeclassBase from evennia.accounts.manager import AccountManager from evennia.accounts.models import AccountDB from evennia.objects.models import ObjectDB from evennia.comms.models import ChannelDB from evennia.commands import cmdhandler from evennia.server.models import ServerConfig from evennia.server.throttle import Throttle from evennia.utils import class_from_module, create, logger from evennia.utils.utils import lazy_property, to_str, make_iter, is_iter, variable_from_module from evennia.server.signals import ( SIGNAL_ACCOUNT_POST_CREATE, SIGNAL_OBJECT_POST_PUPPET, SIGNAL_OBJECT_POST_UNPUPPET, ) from evennia.typeclasses.attributes import NickHandler from evennia.scripts.scripthandler import ScriptHandler from evennia.commands.cmdsethandler import CmdSetHandler from evennia.utils.optionhandler import OptionHandler from django.utils.translation import ugettext as _ from collections import defaultdict from random import getrandbits __all__ = ("DefaultAccount",) _SESSIONS = None _AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1)) _MULTISESSION_MODE = settings.MULTISESSION_MODE _MAX_NR_CHARACTERS = settings.MAX_NR_CHARACTERS _CMDSET_ACCOUNT = settings.CMDSET_ACCOUNT _MUDINFO_CHANNEL = None CREATION_THROTTLE = Throttle(limit=2, timeout=10 * 60) LOGIN_THROTTLE = Throttle(limit=5, timeout=5 * 60) class AccountSessionHandler(object): def __init__(self, account): self.account = account def get(self, sessid=None): global _SESSIONS if not _SESSIONS: from evennia.server.sessionhandler import SESSIONS as _SESSIONS if sessid: return make_iter(_SESSIONS.session_from_account(self.account, sessid)) else: return _SESSIONS.sessions_from_account(self.account) def all(self): return self.get() def count(self): return len(self.get()) class DefaultAccount(AccountDB, metaclass=TypeclassBase): objects = AccountManager() @lazy_property def cmdset(self): return CmdSetHandler(self, True) @lazy_property def scripts(self): return ScriptHandler(self) @lazy_property def nicks(self): return NickHandler(self) @lazy_property def sessions(self): return AccountSessionHandler(self) @lazy_property def options(self): return OptionHandler( self, options_dict=settings.OPTIONS_ACCOUNT_DEFAULT, savefunc=self.attributes.add, loadfunc=self.attributes.get, save_kwargs={"category": "option"}, load_kwargs={"category": "option"}, ) @property def characters(self): objs = self.db._playable_characters if None in objs: objs = [x for x in self.db._playable_characters if x] self.db._playable_characters = objs return objs def disconnect_session_from_account(self, session, reason=None): global _SESSIONS if not _SESSIONS: from evennia.server.sessionhandler import SESSIONS as _SESSIONS _SESSIONS.disconnect(session, reason) def puppet_object(self, session, obj): if not obj: raise RuntimeError("Object not found") if not session: raise RuntimeError("Session not found") if self.get_puppet(session) == obj: self.msg("You are already puppeting this object.") return if not obj.access(self, "puppet"): self.msg(f"You don't have permission to puppet '{obj.key}'.") return if obj.account: if obj.account == self: if obj.sessions.count(): if _MULTISESSION_MODE in (1, 3): txt1 = f"Sharing |c{obj.name}|n with another of your sessions." txt2 = f"|c{obj.name}|n|G is now shared from another of your sessions.|n" self.msg(txt1, session=session) self.msg(txt2, session=obj.sessions.all()) else: txt1 = f"Taking over |c{obj.name}|n from another of your sessions." txt2 = f"|c{obj.name}|n|R is now acted from another of your sessions.|n" self.msg(txt1, session=session) self.msg(txt2, session=obj.sessions.all()) self.unpuppet_object(obj.sessions.get()) elif obj.account.is_connected: self.msg(f"|c{obj.key}|R is already puppeted by another Account.") return if session.puppet: self.unpuppet_object(session) obj.at_pre_puppet(self, session=session) obj.sessions.add(session) obj.account = self session.puid = obj.id session.puppet = obj obj.scripts.validate() obj.locks.cache_lock_bypass(obj) obj.at_post_puppet() SIGNAL_OBJECT_POST_PUPPET.send(sender=obj, account=self, session=session) def unpuppet_object(self, session): for session in make_iter(session): obj = session.puppet if obj: obj.at_pre_unpuppet() obj.sessions.remove(session) if not obj.sessions.count(): del obj.account obj.at_post_unpuppet(self, session=session) SIGNAL_OBJECT_POST_UNPUPPET.send(sender=obj, session=session, account=self) session.puppet = None session.puid = None def unpuppet_all(self): self.unpuppet_object(self.sessions.all()) def get_puppet(self, session): return session.puppet def get_all_puppets(self): return list(set(session.puppet for session in self.sessions.all() if session.puppet)) def __get_single_puppet(self): puppets = self.get_all_puppets() if _MULTISESSION_MODE in (0, 1): return puppets and puppets[0] or None return puppets character = property(__get_single_puppet) puppet = property(__get_single_puppet) @classmethod def is_banned(cls, **kwargs): ip = kwargs.get("ip", "").strip() username = kwargs.get("username", "").lower().strip() bans = ServerConfig.objects.conf("server_bans") if bans and ( any(tup[0] == username for tup in bans if username) or any(tup[2].match(ip) for tup in bans if ip and tup[2]) ): return True return False @classmethod def get_username_validators( cls, validator_config=getattr(settings, "AUTH_USERNAME_VALIDATORS", []) ): objs = [] for validator in validator_config: try: klass = import_string(validator["NAME"]) except ImportError: msg = ( f"The module in NAME could not be imported: {validator['NAME']}. " "Check your AUTH_USERNAME_VALIDATORS setting." ) raise ImproperlyConfigured(msg) objs.append(klass(**validator.get("OPTIONS", {}))) return objs @classmethod
MIT License
zferentz/maproxy
demos/logging_proxy.py
LoggingSession.on_p2s_done_read
python
def on_p2s_done_read(self,data): super(LoggingSession,self).on_p2s_done_read(data) print("#%-3d:C<-S (%d bytes):\n%s" % (self.connid,len(data),filter(lambda x: x in string.printable, data)) )
Override the maproxy.session.Session.on_p2s_done_read(data) function This function is called by the framework (proxyserver) when we get data from the server (to the client)
https://github.com/zferentz/maproxy/blob/e3bb3bea8245189e566350c31b8ca85e5e76a836/demos/logging_proxy.py#L83-L94
import tornado.ioloop import maproxy.proxyserver import maproxy.session import string class LoggingSession(maproxy.session.Session): running_counter=0 def __init__(self,*args,**kwargs): super(LoggingSession,self).__init__(*args,**kwargs) def new_connection(self,stream ,address,proxy): LoggingSession.running_counter+=1 self.connid=LoggingSession.running_counter print("#%-3d: New Connection on %s" % (self.connid,address)) super(LoggingSession,self).new_connection(stream,address,proxy) def on_p2s_done_connect(self): print("#%-3d: Server connected" % (self.connid)) super(LoggingSession,self).on_p2s_done_connect() def on_c2p_done_read(self,data): super(LoggingSession,self).on_c2p_done_read(data) print("#%-3d:C->S (%d bytes):\n%s" % (self.connid,len(data),filter(lambda x: x in string.printable, data)) )
Apache License 2.0
google-research/scenic
scenic/model_lib/base_models/segmentation_model.py
SegmentationModel.get_metrics_fn
python
def get_metrics_fn(self, split: Optional[str] = None) -> base_model.MetricFn: del split return functools.partial( semantic_segmentation_metrics_function, target_is_onehot=self.dataset_meta_data.get('target_is_onehot', False), metrics=_SEMANTIC_SEGMENTATION_METRICS)
Returns a callable metric function for the model. Args: split: The split for which we calculate the metrics. It should be one of the ['train', 'validation', 'test']. Returns: A metric function with the following API: ```metrics_fn(logits, batch)```
https://github.com/google-research/scenic/blob/185b77ccc82291f59ea4c744cf288bfda09ee1b9/scenic/model_lib/base_models/segmentation_model.py#L147-L160
import functools from typing import Any, Callable, List, Dict, Optional, Tuple from flax.training import common_utils from immutabledict import immutabledict import jax.numpy as jnp import numpy as np from scenic.model_lib.base_models import base_model from scenic.model_lib.base_models import model_utils GlobalMetricFn = Callable[[List[jnp.ndarray], Dict[str, Any]], Dict[str, float]] def num_pixels(logits: jnp.ndarray, one_hot_targets: jnp.ndarray, weights: Optional[jnp.ndarray] = None) -> float: del logits if weights is None: return np.prod(one_hot_targets.shape[:3]) assert weights.ndim == 3, ( 'For segmentation task, the weights should be a pixel level mask.') return weights.sum() _SEMANTIC_SEGMENTATION_METRICS = immutabledict({ 'accuracy': (model_utils.weighted_correctly_classified, num_pixels), 'loss': (model_utils.weighted_softmax_cross_entropy, lambda *a, **kw: 1.0) }) def semantic_segmentation_metrics_function( logits: jnp.ndarray, batch: base_model.Batch, target_is_onehot: bool = False, metrics: base_model.MetricNormalizerFnDict = _SEMANTIC_SEGMENTATION_METRICS, ) -> Dict[str, Tuple[jnp.ndarray, jnp.ndarray]]: if target_is_onehot: one_hot_targets = batch['label'] else: one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1]) weights = batch.get('batch_mask') evaluated_metrics = {} for key, val in metrics.items(): evaluated_metrics[key] = model_utils.psum_metric_normalizer( (val[0](logits, one_hot_targets, weights), val[1](logits, one_hot_targets, weights))) return evaluated_metrics class SegmentationModel(base_model.BaseModel):
Apache License 2.0
chuckus/chromewhip
chromewhip/protocol/heapprofiler.py
HeapProfiler.stopTrackingHeapObjects
python
def stopTrackingHeapObjects(cls, reportProgress: Optional['bool'] = None, ): return ( cls.build_send_payload("stopTrackingHeapObjects", { "reportProgress": reportProgress, }), None )
:param reportProgress: If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped. :type reportProgress: bool
https://github.com/chuckus/chromewhip/blob/7249f64f96df3c6ca0859a3da06ce7ddcebbfded/chromewhip/protocol/heapprofiler.py#L214-L227
import logging from typing import Any, Optional, Union from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase log = logging.getLogger(__name__) from chromewhip.protocol import runtime as Runtime HeapSnapshotObjectId = str class SamplingHeapProfileNode(ChromeTypeBase): def __init__(self, callFrame: Union['Runtime.CallFrame'], selfSize: Union['float'], id: Union['int'], children: Union['[SamplingHeapProfileNode]'], ): self.callFrame = callFrame self.selfSize = selfSize self.id = id self.children = children class SamplingHeapProfileSample(ChromeTypeBase): def __init__(self, size: Union['float'], nodeId: Union['int'], ordinal: Union['float'], ): self.size = size self.nodeId = nodeId self.ordinal = ordinal class SamplingHeapProfile(ChromeTypeBase): def __init__(self, head: Union['SamplingHeapProfileNode'], samples: Union['[SamplingHeapProfileSample]'], ): self.head = head self.samples = samples class HeapProfiler(PayloadMixin): @classmethod def addInspectedHeapObject(cls, heapObjectId: Union['HeapSnapshotObjectId'], ): return ( cls.build_send_payload("addInspectedHeapObject", { "heapObjectId": heapObjectId, }), None ) @classmethod def collectGarbage(cls): return ( cls.build_send_payload("collectGarbage", { }), None ) @classmethod def disable(cls): return ( cls.build_send_payload("disable", { }), None ) @classmethod def enable(cls): return ( cls.build_send_payload("enable", { }), None ) @classmethod def getHeapObjectId(cls, objectId: Union['Runtime.RemoteObjectId'], ): return ( cls.build_send_payload("getHeapObjectId", { "objectId": objectId, }), cls.convert_payload({ "heapSnapshotObjectId": { "class": HeapSnapshotObjectId, "optional": False }, }) ) @classmethod def getObjectByHeapObjectId(cls, objectId: Union['HeapSnapshotObjectId'], objectGroup: Optional['str'] = None, ): return ( cls.build_send_payload("getObjectByHeapObjectId", { "objectId": objectId, "objectGroup": objectGroup, }), cls.convert_payload({ "result": { "class": Runtime.RemoteObject, "optional": False }, }) ) @classmethod def getSamplingProfile(cls): return ( cls.build_send_payload("getSamplingProfile", { }), cls.convert_payload({ "profile": { "class": SamplingHeapProfile, "optional": False }, }) ) @classmethod def startSampling(cls, samplingInterval: Optional['float'] = None, ): return ( cls.build_send_payload("startSampling", { "samplingInterval": samplingInterval, }), None ) @classmethod def startTrackingHeapObjects(cls, trackAllocations: Optional['bool'] = None, ): return ( cls.build_send_payload("startTrackingHeapObjects", { "trackAllocations": trackAllocations, }), None ) @classmethod def stopSampling(cls): return ( cls.build_send_payload("stopSampling", { }), cls.convert_payload({ "profile": { "class": SamplingHeapProfile, "optional": False }, }) ) @classmethod
MIT License
natashamjaques/neural_chat
ParlAI/parlai/core/agents.py
Teacher.epoch_done
python
def epoch_done(self): return self.epochDone
Return whether the epoch is done.
https://github.com/natashamjaques/neural_chat/blob/ddb977bb4602a67c460d02231e7bbf7b2cb49a97/ParlAI/parlai/core/agents.py#L204-L206
from parlai.core.build_data import modelzoo_path from parlai.core.utils import warn_once, load_opt_file from .metrics import Metrics, aggregate_metrics import copy import importlib import random import os class Agent(object): def __init__(self, opt, shared=None): if not hasattr(self, 'id'): self.id = 'agent' if not hasattr(self, 'opt'): self.opt = copy.deepcopy(opt) self.observation = None def observe(self, observation): self.observation = observation return observation def act(self): if hasattr(self, 'observation') and self.observation is not None: print('agent received observation:') print(self.observation) t = {} t['text'] = 'hello, teacher!' print('agent sending message:') print(t) return t def getID(self): return self.id def epoch_done(self): return False def reset(self): self.observation = None def reset_metrics(self): pass def save(self, path=None): pass def share(self): shared = {} shared['class'] = type(self) shared['opt'] = self.opt return shared def shutdown(self): pass @classmethod def upgrade_opt(cls, opt_from_disk): return opt_from_disk class Teacher(Agent): def __init__(self, opt, shared=None): if not hasattr(self, 'opt'): self.opt = copy.deepcopy(opt) if not hasattr(self, 'id'): self.id = opt.get('task', 'teacher') if not hasattr(self, 'metrics'): if shared and shared.get('metrics'): self.metrics = shared['metrics'] else: self.metrics = Metrics(opt) self.epochDone = False def act(self): if self.observation is not None and 'text' in self.observation: t = {'text': 'Hello agent!'} return t
MIT License
daeilkim/refinery
refinery/bnpy/bnpy-dev/bnpy/distr/GaussGammaDistr.py
GaussGammaDistr.E_sumlogLam
python
def E_sumlogLam(self): return np.sum(digamma(self.a) - np.log(self.b))
\sum_d E[ \log \lambda_d ] Returns ------- float, scalar
https://github.com/daeilkim/refinery/blob/0d5de8fc3d680a2c79bd0e9384b506229787c74f/refinery/bnpy/bnpy-dev/bnpy/distr/GaussGammaDistr.py#L255-L262
import numpy as np import scipy.linalg from bnpy.util import MVgammaln, MVdigamma from bnpy.util import LOGTWO, LOGPI, LOGTWOPI, EPS from bnpy.util import gammaln, digamma from .Distr import Distr class GaussGammaDistr( Distr ): def __init__(self, a=None, b=None, m=None, kappa=None, **kwargs): self.a = np.squeeze(np.asarray(a)) self.b = np.squeeze(np.asarray(b)) self.m = np.squeeze(np.asarray(m)) self.kappa = float(kappa) assert self.b.ndim <= 1 assert self.m.shape == self.b.shape assert self.a.shape == self.m.shape self.D = self.b.size self.Cache = dict() @classmethod def CreateAsPrior( cls, argDict, Data): D = Data.dim a0 = argDict['a0'] b0 = argDict['b0'] m0 = argDict['m0'] kappa = argDict['kappa'] m = m0 * np.ones(D) a = a0 * np.ones(D) b = b0 * np.ones(D) return cls(a=a, b=b, m=m, kappa=kappa) def E_log_pdf( self, Data ): logPDFConst = -0.5 * self.D * LOGTWOPI + 0.5 * np.sum(self.E_logLam()) logPDFData = -0.5 * self.E_distMahalanobis(Data.X) return logPDFConst + logPDFData def E_distMahalanobis(self, X): Elambda = self.a / self.b if X.ndim == 2: weighted_SOS = np.sum( Elambda * np.square(X - self.m), axis=1) else: weighted_SOS = np.sum(Elambda * np.square(X - self.m)) weighted_SOS += self.D/self.kappa return weighted_SOS def get_post_distr( self, SS, k=None, kB=None, **kwargs): if k is None: EN = SS.N Ex = SS.x Exx = SS.xx elif kB is not None: EN = float(SS.N[k] + SS.N[kB]) Ex = SS.x[k] + SS.x[kB] Exx = SS.xx[k] + SS.xx[kB] else: EN = float(SS.N[k]) Ex = SS.x[k] Exx = SS.xx[k] kappa = self.kappa + EN m = (self.kappa * self.m + Ex) / kappa a = self.a + 0.5*EN b = self.b + 0.5*(Exx + self.kappa*np.square(self.m) - kappa*np.square(m)) return GaussGammaDistr(a, b, m, kappa) def post_update_soVB( self, rho, refDistr, **kwargs): etaCUR = self.get_natural_params() etaSTAR = refDistr.get_natural_params() etaNEW = list(etaCUR) for i in xrange(len(etaCUR)): etaNEW[i] = rho*etaSTAR[i] + (1-rho)*etaCUR[i] self.set_natural_params(tuple(etaNEW)) @classmethod def calc_log_norm_const(cls, a, b, m, kappa): logNormConstNormal = 0.5 * D * (LOGTWOPI + np.log(kappa)) logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b)) return logNormConstNormal + logNormConstGamma def get_log_norm_const(self): D = self.D a = self.a b = self.b logNormConstNormal = 0.5 * D * (LOGTWOPI - np.log(self.kappa)) logNormConstGamma = np.sum(gammaln(a)) - np.inner(a, np.log(b)) return logNormConstNormal + logNormConstGamma def E_log_pdf_Phi(self, Distr, doNormConst=True): assert Distr.D == self.D selfELam = self.a / self.b logPDF = np.inner(Distr.a - 0.5, self.E_logLam()) - np.inner(Distr.b, selfELam) - 0.5 * Distr.kappa * self.E_distMahalanobis(Distr.m) if doNormConst: return logPDF - Distr.get_log_norm_const() return logPDF def get_entropy(self): return -1.0 * self.E_log_pdf_Phi(self) def get_natural_params(self): t1 = self.a t2 = self.b + 0.5 * self.kappa * np.square(self.m) t3 = self.kappa * self.m t4 = self.kappa etatuple = t1, t2, t3, t4 return etatuple def set_natural_params(self, etatuple): self.a = etatuple[0] self.kappa = etatuple[3] self.m = etatuple[2]/self.kappa self.b = etatuple[1] - 0.5 * self.kappa * np.square(self.m) self.Cache = dict() def E_logLam(self): return digamma(self.a) - np.log(self.b)
MIT License
zengyi-qin/tlnet
avod/builders/config_builder_util.py
get_configs_from_pipeline_file
python
def get_configs_from_pipeline_file(pipeline_config_path, is_training): pipeline_config = pipeline_pb2.NetworkPipelineConfig() with open(pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model_config config_file_name = os.path.split(pipeline_config_path)[1].split('.')[0] checkpoint_name = model_config.checkpoint_name if config_file_name != checkpoint_name: raise ValueError('Config and checkpoint names must match.') output_root_dir = avod.root_dir() + '/data/outputs/' + checkpoint_name paths_config = model_config.paths_config if not paths_config.checkpoint_dir: checkpoint_dir = output_root_dir + '/checkpoints' if is_training: if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) paths_config.checkpoint_dir = checkpoint_dir if not paths_config.logdir: paths_config.logdir = output_root_dir + '/logs/' if not paths_config.pred_dir: paths_config.pred_dir = output_root_dir + '/predictions' train_config = pipeline_config.train_config eval_config = pipeline_config.eval_config dataset_config = pipeline_config.dataset_config if is_training: experiment_config_path = output_root_dir + '/' + model_config.checkpoint_name experiment_config_path += '.config' shutil.copy(pipeline_config_path, experiment_config_path) return model_config, train_config, eval_config, dataset_config
Reads model configuration from a pipeline_pb2.NetworkPipelineConfig. Args: pipeline_config_path: A path directory to the network pipeline config is_training: A boolean flag to indicate training stage, used for creating the checkpoint directory which must be created at the first training iteration. Returns: model_config: A model_pb2.ModelConfig train_config: A train_pb2.TrainConfig eval_config: A eval_pb2.EvalConfig dataset_config: A kitti_dataset_pb2.KittiDatasetConfig
https://github.com/zengyi-qin/tlnet/blob/11fa48160158b550ad2dc810ed564eebe17e8f5e/avod/builders/config_builder_util.py#L52-L112
import os import shutil from google.protobuf import text_format import avod from avod.protos import model_pb2 from avod.protos import pipeline_pb2 class ConfigObj: pass def proto_to_obj(config): all_fields = list(config.DESCRIPTOR.fields_by_name) config_obj = ConfigObj() for field in all_fields: field_value = eval('config.{}'.format(field)) setattr(config_obj, field, field_value) return config_obj def get_model_config_from_file(config_path): model_config = model_pb2.ModelConfig() with open(config_path, 'r') as f: text_format.Merge(f.read(), model_config) return model_config
Apache License 2.0
alliefitter/boto3_type_annotations
boto3_type_annotations_with_docs/boto3_type_annotations/appsync/paginator.py
ListFunctions.paginate
python
def paginate(self, apiId: str, PaginationConfig: Dict = None) -> Dict: pass
Creates an iterator that will paginate through responses from :py:meth:`AppSync.Client.list_functions`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/appsync-2017-07-25/ListFunctions>`_ **Request Syntax** :: response_iterator = paginator.paginate( apiId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'functions': [ { 'functionId': 'string', 'functionArn': 'string', 'name': 'string', 'description': 'string', 'dataSourceName': 'string', 'requestMappingTemplate': 'string', 'responseMappingTemplate': 'string', 'functionVersion': 'string' }, ], 'NextToken': 'string' } **Response Structure** - *(dict) --* - **functions** *(list) --* A list of ``Function`` objects. - *(dict) --* A function is a reusable entity. Multiple functions can be used to compose the resolver logic. - **functionId** *(string) --* A unique ID representing the ``Function`` object. - **functionArn** *(string) --* The ARN of the ``Function`` object. - **name** *(string) --* The name of the ``Function`` object. - **description** *(string) --* The ``Function`` description. - **dataSourceName** *(string) --* The name of the ``DataSource`` . - **requestMappingTemplate** *(string) --* The ``Function`` request mapping template. Functions support only the 2018-05-29 version of the request mapping template. - **responseMappingTemplate** *(string) --* The ``Function`` response mapping template. - **functionVersion** *(string) --* The version of the request mapping template. Currently only the 2018-05-29 version of the template is supported. - **NextToken** *(string) --* A token to resume pagination. :type apiId: string :param apiId: **[REQUIRED]** The GraphQL API ID. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns:
https://github.com/alliefitter/boto3_type_annotations/blob/2a88aa562b1aee6e8a6cc30402980884b3707fbb/boto3_type_annotations_with_docs/boto3_type_annotations/appsync/paginator.py#L237-L310
from typing import Dict from botocore.paginate import Paginator class ListApiKeys(Paginator): def paginate(self, apiId: str, PaginationConfig: Dict = None) -> Dict: pass class ListDataSources(Paginator): def paginate(self, apiId: str, PaginationConfig: Dict = None) -> Dict: pass class ListFunctions(Paginator):
MIT License
acroz/pylivy
livy/client.py
LivyClient.close
python
def close(self) -> None: self._client.close()
Close the underlying requests session, if managed by this class.
https://github.com/acroz/pylivy/blob/6c7bf18720345a557f6301ecc02a9c4f5b6fbf78/livy/client.py#L121-L123
import logging from typing import Any, Union, Dict, List, Tuple, Optional import requests from livy.models import ( Version, Session, SessionKind, Statement, StatementKind, Batch, BatchLog, ) Auth = Union[requests.auth.AuthBase, Tuple[str, str]] Verify = Union[bool, str] LOGGER = logging.getLogger(__name__) VALID_LEGACY_SESSION_KINDS = { SessionKind.SPARK, SessionKind.PYSPARK, SessionKind.PYSPARK3, SessionKind.SPARKR, } VALID_SESSION_KINDS = { SessionKind.SPARK, SessionKind.PYSPARK, SessionKind.SPARKR, SessionKind.SQL, SessionKind.SHARED, } class JsonClient: def __init__( self, url: str, auth: Auth = None, verify: Verify = True, requests_session: requests.Session = None, ) -> None: self.url = url self.auth = auth self.verify = verify if requests_session is None: self.session = requests.Session() self.managed_session = True else: self.session = requests_session self.managed_session = False def close(self) -> None: if self.managed_session: self.session.close() def get(self, endpoint: str = "", params: dict = None) -> dict: return self._request("GET", endpoint, params=params) def post(self, endpoint: str, data: dict = None) -> dict: return self._request("POST", endpoint, data) def delete(self, endpoint: str = "") -> dict: return self._request("DELETE", endpoint) def _request( self, method: str, endpoint: str, data: dict = None, params: dict = None, ) -> dict: url = self.url.rstrip("/") + endpoint response = self.session.request( method, url, auth=self.auth, verify=self.verify, json=data, params=params, ) response.raise_for_status() return response.json() class LivyClient: def __init__( self, url: str, auth: Auth = None, verify: Verify = True, requests_session: requests.Session = None, ) -> None: self._client = JsonClient(url, auth, verify, requests_session) self._server_version_cache: Optional[Version] = None
MIT License
usi-systems/p4benchmark
p4gen/p4template.py
register_write
python
def register_write(register, field, index): binding = {'register' : register, 'field': field, 'index': index} return read_template('template/actions/write_action.txt', binding)
This method returns a primitive action: write a register at a specific index :param register: the register name :type register: str :param field: the field to get the data from :type field: int :param index: the index on the register :type index: int :returns: str -- the code in plain text :raises: None
https://github.com/usi-systems/p4benchmark/blob/e1b22c106c3458f757a362f57027670cee286c47/p4gen/p4template.py#L405-L420
from string import Template from pkg_resources import resource_string def read_template(filename, binding={}): src = Template(resource_string(__name__, filename)) return src.substitute(binding) def p4_define(): p4_define = read_template('template/define.txt') return p4_define def ethernet_header(): return read_template('template/headers/ethernet.txt') def ethernet(): ethernet_hdr = read_template('template/headers/ethernet.txt') parse_eth = read_template('template/parsers/parse_ethernet.txt') return (ethernet_hdr + parse_eth) def ipv4(checksum=False): ipv4_hdr = read_template('template/headers/ipv4.txt') if checksum: parse_ipv4 = read_template('template/parsers/parse_ipv4_checksum.txt') else: parse_ipv4 = read_template('template/parsers/parse_ipv4.txt') return (ipv4_hdr + parse_ipv4) def tcp(): tcp_hdr = read_template('template/headers/tcp.txt') parse_tcp = read_template('template/parsers/parse_tcp.txt') return (tcp_hdr + parse_tcp) def nop_action(): return read_template('template/actions/nop.txt') def forward_table(): d = { 'tbl_name': 'forward_table' } return read_template('template/tables/forward_table.txt', d) def add_table_no_match(tbl_name, actions='_nop;', tbl_size=1): binding = {'tbl_name': tbl_name, 'actions': actions, 'tbl_size': tbl_size} return read_template('template/tables/table_no_match.txt', binding) def add_default_rule(tbl_name, default_action): binding = {'tbl_name': tbl_name, 'default_action': default_action} return read_template('template/commands/default_action.txt', binding) def add_table(tbl_name, matches='', actions='', tbl_size=1): binding = { 'tbl_name': tbl_name, 'matches' : matches, 'actions' : actions, 'tbl_size': tbl_size} return read_template('template/tables/table.txt', binding) def apply_table(tbl_name): return read_template('template/controls/apply_table.txt', {'tbl_name': tbl_name}) def control(fwd_tbl, applies): d = { 'fwd_tbl' : fwd_tbl, 'applies': applies } return read_template('template/controls/ingress.txt', d) def cli_commands(fwd_tbl, ): return read_template('template/commands/forward.txt', { 'fwd_tbl' : fwd_tbl}) def add_rule(tbl_name, action, match_value, params=''): binding = { 'tbl_name': tbl_name, 'action' : action, 'match_value' : match_value, 'params': params } return read_template('template/commands/add_rule.txt', binding) def default_nop(tbl_name): return add_default_rule(tbl_name, '_nop') def add_header_field(field_name, field_width): return '\t\t{0: <8}: {1};\n'.format(field_name, field_width) def add_header(header_type_name, field_dec): binding = {'header_type_name': header_type_name, 'field_dec': field_dec} return read_template('template/headers/generic.txt', binding) def add_metadata_instance(header_type_name, instance_name): binding = {'header_type_name': header_type_name, 'instance_name': instance_name} return read_template('template/headers/metadata.txt', binding) def select_case(select_key, next_state): return '\t{0: <8}: {1};\n'.format(select_key, next_state) def add_parser(header_type_name, header_name, parser_state_name, select_field, next_states): binding = {'header_type_name': header_type_name, 'header_name': header_name, 'parser_state_name': parser_state_name, 'select_field': select_field, 'next_states': next_states} return read_template('template/parsers/parse_generic.txt', binding) def add_parser_without_select(header_type_name, header_name, parser_state_name, next_state): binding = {'header_type_name': header_type_name, 'header_name': header_name, 'parser_state_name': parser_state_name, 'next_state': next_state} return read_template('template/parsers/parse_no_select.txt', binding) def add_compound_action(action_name, params, instruction_set): binding = {'action_name' : action_name, 'params': params, 'instruction_set': instruction_set} return read_template('template/actions/compound_action.txt', binding) def add_register(register_name, element_width, nb_element): binding = {'register_name': register_name, 'element_width': element_width, 'nb_element': nb_element} return read_template('template/states/register.txt', binding) def register_actions(read_set, write_set): binding = {'read_set' : read_set, 'write_set': write_set} return read_template('template/actions/register_actions.txt', binding) def register_read(register, field, index): binding = {'register' : register, 'field': field, 'index': index} return read_template('template/actions/read_action.txt', binding)
Apache License 2.0
kuri65536/python-for-android
python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cache.py
Cache.__getitem__
python
def __getitem__(self, key): return self.get(key)
__getitem__ is necessary for this object to emulate a container.
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cache.py#L245-L249
import datetime import pickle import random import __main__ from google.appengine.ext import db from google.appengine.api import memcache DEFAULT_TIMEOUT = 3600 CLEAN_CHECK_PERCENT = 50 MAX_HITS_TO_CLEAN = 100 class _AppEngineUtilities_Cache(db.Model): cachekey = db.StringProperty() createTime = db.DateTimeProperty(auto_now_add=True) timeout = db.DateTimeProperty() value = db.BlobProperty() class Cache(object): def __init__(self, clean_check_percent = CLEAN_CHECK_PERCENT, max_hits_to_clean = MAX_HITS_TO_CLEAN, default_timeout = DEFAULT_TIMEOUT): self.clean_check_percent = clean_check_percent self.max_hits_to_clean = max_hits_to_clean self.default_timeout = default_timeout if random.randint(1, 100) < self.clean_check_percent: self._clean_cache() if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheInitialized') def _clean_cache(self): query = _AppEngineUtilities_Cache.all() query.filter('timeout < ', datetime.datetime.now()) results = query.fetch(self.max_hits_to_clean) db.delete(results) def _validate_key(self, key): if key == None: raise KeyError def _validate_value(self, value): if value == None: raise ValueError def _validate_timeout(self, timeout): if timeout == None: timeout = datetime.datetime.now() + datetime.timedelta(seconds=DEFAULT_TIMEOUT) if type(timeout) == type(1): timeout = datetime.datetime.now() + datetime.timedelta(seconds = timeout) if type(timeout) != datetime.datetime: raise TypeError if timeout < datetime.datetime.now(): raise ValueError return timeout def add(self, key = None, value = None, timeout = None): self._validate_key(key) self._validate_value(value) timeout = self._validate_timeout(timeout) if key in self: raise KeyError cacheEntry = _AppEngineUtilities_Cache() cacheEntry.cachekey = key cacheEntry.value = pickle.dumps(value) cacheEntry.timeout = timeout try: cacheEntry.put() except: pass memcache_timeout = timeout - datetime.datetime.now() memcache.set('cache-'+key, value, int(memcache_timeout.seconds)) if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheAdded') def set(self, key = None, value = None, timeout = None): self._validate_key(key) self._validate_value(value) timeout = self._validate_timeout(timeout) cacheEntry = self._read(key) if not cacheEntry: cacheEntry = _AppEngineUtilities_Cache() cacheEntry.cachekey = key cacheEntry.value = pickle.dumps(value) cacheEntry.timeout = timeout try: cacheEntry.put() except: pass memcache_timeout = timeout - datetime.datetime.now() memcache.set('cache-'+key, value, int(memcache_timeout.seconds)) if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheSet') def _read(self, key = None): query = _AppEngineUtilities_Cache.all() query.filter('cachekey', key) query.filter('timeout > ', datetime.datetime.now()) results = query.fetch(1) if len(results) is 0: return None return results[0] if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheReadFromDatastore') if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheRead') def delete(self, key = None): memcache.delete('cache-'+key) result = self._read(key) if result: if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheDeleted') result.delete() def get(self, key): mc = memcache.get('cache-'+key) if mc: if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheReadFromMemcache') if 'AEU_Events' in __main__.__dict__: __main__.AEU_Events.fire_event('cacheRead') return mc result = self._read(key) if result: timeout = result.timeout - datetime.datetime.now() memcache.set('cache-'+key, pickle.loads(result.value), int(timeout.seconds)) return pickle.loads(result.value) else: raise KeyError def get_many(self, keys): dict = {} for key in keys: value = self.get(key) if value is not None: dict[key] = val return dict
Apache License 2.0
airshipit/drydock
python/drydock_provisioner/drydock_client/client.py
DrydockClient.create_task
python
def create_task(self, design_ref, task_action, node_filter=None): endpoint = 'v1.0/tasks' task_dict = { 'action': task_action, 'design_ref': design_ref, 'node_filter': node_filter, } self.logger.debug("drydock_client is calling %s API: body is %s" % (endpoint, str(task_dict))) resp = self.session.post(endpoint, data=task_dict) self._check_response(resp) return resp.json()
Create a new task in Drydock :param string design_ref: A URI reference to the design documents for this task :param string task_action: The action that should be executed :param dict node_filter: A filter for narrowing the scope of the task. Valid fields are 'node_names', 'rack_names', 'node_tags'. :return: The dictionary representation of the created task
https://github.com/airshipit/drydock/blob/c90fa60e2a156953563ea9fd9d9aa848171c8325/python/drydock_provisioner/drydock_client/client.py#L132-L158
import logging from drydock_provisioner import error as errors class DrydockClient(object): def __init__(self, session): self.session = session self.logger = logging.getLogger(__name__) def get_task_build_data(self, task_id): endpoint = 'v1.0/tasks/{}/builddata'.format(task_id) resp = self.session.get(endpoint) self._check_response(resp) return resp.json() def get_node_build_data(self, nodename, latest=True): endpoint = 'v1.0/nodes/{}/builddata?latest={}'.format(nodename, latest) resp = self.session.get(endpoint) self._check_response(resp) return resp.json() def get_nodes(self): endpoint = 'v1.0/nodes' resp = self.session.get(endpoint) self._check_response(resp) return resp.json() def get_nodes_for_filter(self, design_ref, node_filter=None): endpoint = 'v1.0/nodefilter' body = {'node_filter': node_filter, 'design_ref': design_ref} resp = self.session.post(endpoint, data=body) self._check_response(resp) return resp.json() def get_tasks(self): endpoint = "v1.0/tasks" resp = self.session.get(endpoint) self._check_response(resp) return resp.json() def get_task(self, task_id, builddata=None, subtaskerrors=None, layers=None): endpoint = "v1.0/tasks/%s" % (task_id) query_params = [] if builddata: query_params.append('builddata=true') if subtaskerrors: query_params.append('subtaskerrors=true') if layers: query_params.append('layers=%s' % layers) if query_params: endpoint = '%s?%s' % (endpoint, '&'.join(query_params)) resp = self.session.get(endpoint) self._check_response(resp) return resp.json()
Apache License 2.0
criteo/criteo-python-marketing-sdk
criteo_marketing/models/shipping.py
Shipping.__init__
python
def __init__(self, country=None, location_id=None, postal_code=None, price=None, region=None, service=None): self._country = None self._location_id = None self._postal_code = None self._price = None self._region = None self._service = None self.discriminator = None if country is not None: self.country = country if location_id is not None: self.location_id = location_id if postal_code is not None: self.postal_code = postal_code if price is not None: self.price = price if region is not None: self.region = region if service is not None: self.service = service
Shipping - a model defined in OpenAPI
https://github.com/criteo/criteo-python-marketing-sdk/blob/1093f86cf035cb6ce657b47f0f5e768c1fc2271c/criteo_marketing/models/shipping.py#L51-L73
import pprint import re import six class Shipping(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'country': 'str', 'location_id': 'int', 'postal_code': 'str', 'price': 'Price', 'region': 'str', 'service': 'str' } attribute_map = { 'country': 'country', 'location_id': 'locationId', 'postal_code': 'postalCode', 'price': 'price', 'region': 'region', 'service': 'service' }
Apache License 2.0
copier-org/copier
copier/user_data.py
parse_yaml_string
python
def parse_yaml_string(string: str) -> Any: try: return yaml.safe_load(string) except yaml.error.YAMLError as error: raise ValueError(str(error))
Parse a YAML string and raise a ValueError if parsing failed. This method is needed because :meth:`prompt` requires a ``ValueError`` to repeat falied questions.
https://github.com/copier-org/copier/blob/ec1a26c45d7e59ceba55ee413d4fa3d1aee4aadb/copier/user_data.py#L383-L392
import datetime import json import warnings from collections import ChainMap from dataclasses import field from hashlib import sha512 from os import urandom from pathlib import Path from typing import ( TYPE_CHECKING, Any, Callable, ChainMap as t_ChainMap, Dict, List, Union, ) import yaml from jinja2 import UndefinedError from jinja2.sandbox import SandboxedEnvironment from prompt_toolkit.lexers import PygmentsLexer from pydantic import validator from pydantic.dataclasses import dataclass from pygments.lexers.data import JsonLexer, YamlLexer from questionary.prompts.common import Choice from .errors import InvalidTypeError, UserMessageError from .tools import cast_str_to_bool, force_str_end from .types import AllowArbitraryTypes, AnyByStrDict, OptStr, OptStrOrPath, StrOrPath try: from functools import cached_property except ImportError: from backports.cached_property import cached_property if TYPE_CHECKING: pass def _now(): warnings.warn( "'now' will be removed in a future release of Copier.\n" "Please use this instead: {{ '%Y-%m-%d %H:%M:%S' | strftime }}\n" "strftime format reference https://strftime.org/", FutureWarning, ) return datetime.datetime.utcnow() def _make_secret(): warnings.warn( "'make_secret' will be removed in a future release of Copier.\n" "Please use this instead: {{ 999999999999999999999999999999999|ans_random|hash('sha512') }}\n" "random and hash filters documentation: https://docs.ansible.com/ansible/2.3/playbooks_filters.html", FutureWarning, ) return sha512(urandom(48)).hexdigest() DEFAULT_DATA: AnyByStrDict = { "now": _now, "make_secret": _make_secret, } @dataclass class AnswersMap: local: AnyByStrDict = field(default_factory=dict, init=False) user: AnyByStrDict = field(default_factory=dict) init: AnyByStrDict = field(default_factory=dict) metadata: AnyByStrDict = field(default_factory=dict) last: AnyByStrDict = field(default_factory=dict) default: AnyByStrDict = field(default_factory=dict) @cached_property def combined(self) -> t_ChainMap[str, Any]: return ChainMap( self.local, self.user, self.init, self.metadata, self.last, self.default, DEFAULT_DATA, ) def old_commit(self) -> OptStr: return self.last.get("_commit") @dataclass(config=AllowArbitraryTypes) class Question: var_name: str answers: AnswersMap jinja_env: SandboxedEnvironment choices: Union[Dict[Any, Any], List[Any]] = field(default_factory=list) default: Any = None help: str = "" ask_user: bool = False multiline: Union[str, bool] = False placeholder: str = "" secret: bool = False type: str = "" when: Union[str, bool] = True @validator("var_name") def _check_var_name(cls, v): if v in DEFAULT_DATA: raise ValueError("Invalid question name") return v @validator("type", always=True) def _check_type(cls, v, values): if v == "": default_type_name = type(values.get("default")).__name__ v = default_type_name if default_type_name in CAST_STR_TO_NATIVE else "yaml" return v def get_default(self) -> Any: cast_fn = self.get_cast_fn() try: result = self.answers.init[self.var_name] except KeyError: try: result = self.answers.last[self.var_name] except KeyError: result = self.render_value(self.default) result = cast_answer_type(result, cast_fn) return result def get_default_rendered(self) -> Union[bool, str, Choice, None]: default = self.get_default() if self.choices: for choice in self._formatted_choices: if choice.value == default: return choice return None if isinstance(default, bool) and self.get_type_name() == "bool": return default if default is None: return "" if self.get_type_name() == "json": return json.dumps(default, indent=2 if self.get_multiline() else None) if self.get_type_name() == "yaml": return yaml.safe_dump( default, default_flow_style=not self.get_multiline(), width=float("inf") ).strip() return str(default) @cached_property def _formatted_choices(self) -> List[Choice]: result = [] choices = self.choices if isinstance(self.choices, dict): choices = list(self.choices.items()) for choice in choices: if isinstance(choice, dict): name = choice["name"] value = choice["value"] elif isinstance(choice, (tuple, list)): name, value = choice else: name = value = choice name = str(self.render_value(name)) value = self.render_value(value) result.append(Choice(name, value)) return result def filter_answer(self, answer) -> Any: if answer == self.get_default_rendered(): return self.get_default() return cast_answer_type(answer, self.get_cast_fn()) def get_message(self) -> str: message = "" if self.help: rendered_help = self.render_value(self.help) message = force_str_end(rendered_help) message += f"{self.var_name}? Format: {self.get_type_name()}" return message def get_placeholder(self) -> str: return self.render_value(self.placeholder) def get_questionary_structure(self) -> AnyByStrDict: lexer = None result: AnyByStrDict = { "default": self.get_default_rendered(), "filter": self.filter_answer, "message": self.get_message(), "mouse_support": True, "name": self.var_name, "qmark": "🕵️" if self.secret else "🎤", "when": self.get_when, } questionary_type = "input" type_name = self.get_type_name() if type_name == "bool": questionary_type = "confirm" if self.choices: questionary_type = "select" result["choices"] = self._formatted_choices if questionary_type == "input": if self.secret: questionary_type = "password" elif type_name == "yaml": lexer = PygmentsLexer(YamlLexer) elif type_name == "json": lexer = PygmentsLexer(JsonLexer) if lexer: result["lexer"] = lexer result["multiline"] = self.get_multiline() placeholder = self.get_placeholder() if placeholder: result["placeholder"] = placeholder result["validate"] = self.validate_answer result.update({"type": questionary_type}) return result def get_cast_fn(self) -> Callable: type_name = self.get_type_name() if type_name not in CAST_STR_TO_NATIVE: raise InvalidTypeError("Invalid question type") return CAST_STR_TO_NATIVE.get(type_name, parse_yaml_string) def get_type_name(self) -> str: return self.render_value(self.type) def get_multiline(self) -> bool: multiline = self.render_value(self.multiline) multiline = cast_answer_type(multiline, cast_str_to_bool) return bool(multiline) def validate_answer(self, answer) -> bool: cast_fn = self.get_cast_fn() try: cast_fn(answer) return True except Exception: return False def get_when(self, answers) -> bool: if ( not self.ask_user or self.var_name in self.answers.init ): return False when = self.when when = self.render_value(when) when = cast_answer_type(when, cast_str_to_bool) return bool(when) def render_value(self, value: Any) -> str: try: template = self.jinja_env.from_string(value) except TypeError: return value try: return template.render(**self.answers.combined) except UndefinedError as error: raise UserMessageError(str(error)) from error
MIT License
dw/mitogen
ansible_mitogen/connection.py
CallChain.call
python
def call(self, func, *args, **kwargs): t0 = time.time() try: recv = self.call_async(func, *args, **kwargs) return self._rethrow(recv) finally: LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0), mitogen.parent.CallSpec(func, args, kwargs))
Like :meth:`mitogen.parent.CallChain.call`, but log timings.
https://github.com/dw/mitogen/blob/cc8f9a016965876bcd9ec390d53035d6ed842b07/ansible_mitogen/connection.py#L444-L454
from __future__ import absolute_import from __future__ import unicode_literals import errno import logging import os import pprint import stat import sys import time import ansible.constants as C import ansible.errors import ansible.plugins.connection import ansible.utils.shlex import mitogen.core import mitogen.fork import mitogen.utils import ansible_mitogen.mixins import ansible_mitogen.parsing import ansible_mitogen.process import ansible_mitogen.services import ansible_mitogen.target import ansible_mitogen.transport_config LOG = logging.getLogger(__name__) task_vars_msg = ( 'could not recover task_vars. This means some connection ' 'settings may erroneously be reset to their defaults. ' 'Please report a bug if you encounter this message.' ) def get_remote_name(spec): if spec.mitogen_mask_remote_name(): return 'ansible' return None def optional_int(value): try: return int(value) except (TypeError, ValueError): return None def convert_bool(obj): if isinstance(obj, bool): return obj if str(obj).lower() in ('no', 'false', '0'): return False if str(obj).lower() not in ('yes', 'true', '1'): raise ansible.errors.AnsibleConnectionFailure( 'expected yes/no/true/false/0/1, got %r' % (obj,) ) return True def default(value, default): if value is None: return default return value def _connect_local(spec): return { 'method': 'local', 'kwargs': { 'python_path': spec.python_path(), } } def _connect_ssh(spec): if C.HOST_KEY_CHECKING: check_host_keys = 'enforce' else: check_host_keys = 'ignore' private_key_file = spec.private_key_file() if private_key_file is not None: private_key_file = os.path.expanduser(private_key_file) return { 'method': 'ssh', 'kwargs': { 'check_host_keys': check_host_keys, 'hostname': spec.remote_addr(), 'username': spec.remote_user(), 'compression': convert_bool( default(spec.mitogen_ssh_compression(), True) ), 'password': spec.password(), 'port': spec.port(), 'python_path': spec.python_path(), 'identity_file': private_key_file, 'identities_only': False, 'ssh_path': spec.ssh_executable(), 'connect_timeout': spec.ansible_ssh_timeout(), 'ssh_args': spec.ssh_args(), 'ssh_debug_level': spec.mitogen_ssh_debug_level(), 'remote_name': get_remote_name(spec), 'keepalive_count': ( spec.mitogen_ssh_keepalive_count() or 10 ), 'keepalive_interval': ( spec.mitogen_ssh_keepalive_interval() or 30 ), } } def _connect_buildah(spec): return { 'method': 'buildah', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_docker(spec): return { 'method': 'docker', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(rediscover_python=True), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_kubectl(spec): return { 'method': 'kubectl', 'kwargs': { 'pod': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'kubectl_path': spec.mitogen_kubectl_path(), 'kubectl_args': spec.extra_args(), 'remote_name': get_remote_name(spec), } } def _connect_jail(spec): return { 'method': 'jail', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_lxc(spec): return { 'method': 'lxc', 'kwargs': { 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'lxc_attach_path': spec.mitogen_lxc_attach_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_lxd(spec): return { 'method': 'lxd', 'kwargs': { 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'lxc_path': spec.mitogen_lxc_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_machinectl(spec): return _connect_setns(spec, kind='machinectl') def _connect_setns(spec, kind=None): return { 'method': 'setns', 'kwargs': { 'container': spec.remote_addr(), 'username': spec.remote_user(), 'python_path': spec.python_path(), 'kind': kind or spec.mitogen_kind(), 'docker_path': spec.mitogen_docker_path(), 'lxc_path': spec.mitogen_lxc_path(), 'lxc_info_path': spec.mitogen_lxc_info_path(), 'machinectl_path': spec.mitogen_machinectl_path(), } } def _connect_su(spec): return { 'method': 'su', 'enable_lru': True, 'kwargs': { 'username': spec.become_user(), 'password': spec.become_pass(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_sudo(spec): return { 'method': 'sudo', 'enable_lru': True, 'kwargs': { 'username': spec.become_user(), 'password': spec.become_pass(), 'python_path': spec.python_path(), 'sudo_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'sudo_args': spec.sudo_args(), 'remote_name': get_remote_name(spec), } } def _connect_doas(spec): return { 'method': 'doas', 'enable_lru': True, 'kwargs': { 'username': spec.become_user(), 'password': spec.become_pass(), 'python_path': spec.python_path(), 'doas_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_mitogen_su(spec): return { 'method': 'su', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } } def _connect_mitogen_sudo(spec): return { 'method': 'sudo', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'sudo_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'sudo_args': spec.sudo_args(), 'remote_name': get_remote_name(spec), } } def _connect_mitogen_doas(spec): return { 'method': 'doas', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'doas_path': spec.ansible_doas_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } } CONNECTION_METHOD = { 'buildah': _connect_buildah, 'docker': _connect_docker, 'kubectl': _connect_kubectl, 'jail': _connect_jail, 'local': _connect_local, 'lxc': _connect_lxc, 'lxd': _connect_lxd, 'machinectl': _connect_machinectl, 'setns': _connect_setns, 'ssh': _connect_ssh, 'smart': _connect_ssh, 'su': _connect_su, 'sudo': _connect_sudo, 'doas': _connect_doas, 'mitogen_su': _connect_mitogen_su, 'mitogen_sudo': _connect_mitogen_sudo, 'mitogen_doas': _connect_mitogen_doas, } class CallChain(mitogen.parent.CallChain): call_aborted_msg = ( 'Mitogen was disconnected from the remote environment while a call ' 'was in-progress. If you feel this is in error, please file a bug. ' 'Original error was: %s' ) def __init__(self, connection, context, pipelined=False): super(CallChain, self).__init__(context, pipelined) self._connection = connection def _rethrow(self, recv): try: return recv.get().unpickle() except mitogen.core.ChannelError as e: self._connection.reset() raise ansible.errors.AnsibleConnectionFailure( self.call_aborted_msg % (e,) )
BSD 3-Clause New or Revised License
googlecloudplatform/appengine-python-standard
src/google/appengine/api/apiproxy_stub_map.py
ListOfHooks.Append
python
def Append(self, key, function, service=None): return self.__Insert(len(self), key, function, service)
Appends a hook at the end of the list. Args: key: A unique key (within the module) for this particular function. If something from the same module with the same key is already registered, nothing will be added. function: The hook to be added. service: Optional argument that restricts the hook to a particular API. Returns: `True` if the collection was modified.
https://github.com/googlecloudplatform/appengine-python-standard/blob/42c99c7a83f4ed50c724ecdde119a606a3ca58f3/src/google/appengine/api/apiproxy_stub_map.py#L148-L161
from concurrent import futures import inspect import threading import six from google.appengine.api import apiproxy_rpc from google.appengine.runtime import apiproxy_errors def CreateRPC(service, stubmap=None): if stubmap is None: stubmap = apiproxy stub = stubmap.GetStub(service) assert stub, 'No api proxy found for service "%s"' % service assert hasattr(stub, 'CreateRPC'), (('The service "%s" doesn\'t have ' + 'a CreateRPC method.') % service) return stub.CreateRPC() def MakeSyncCall(service, call, request, response, stubmap=None): if stubmap is None: stubmap = apiproxy return stubmap.MakeSyncCall(service, call, request, response) class ListOfHooks(object): def __init__(self): self.__content = [] self.__unique_keys = set() def __len__(self): return self.__content.__len__() def __Insert(self, index, key, function, service=None): unique_key = (key, inspect.getmodule(function)) if unique_key in self.__unique_keys: return False argsspec_func = inspect.getfullargspec if six.PY3 else inspect.getargspec num_args = len(argsspec_func(function)[0]) if (inspect.ismethod(function)): num_args -= 1 self.__content.insert(index, (key, function, service, num_args)) self.__unique_keys.add(unique_key) return True
Apache License 2.0
gibiansky/python-pipes
pipes.py
filter_by_index
python
def filter_by_index(generator, indices): for index, value in enumerate(generator): if index in indices: yield index, value
Select elements from a stream by their indices.
https://github.com/gibiansky/python-pipes/blob/a6159268db908c4b53d514fbf8ac458704e03991/pipes.py#L304-L308
from typing import Iterator, Tuple, TypeVar, Union, Callable, Any from collections import Counter, namedtuple, defaultdict from numbers import Number from threading import Thread import os import subprocess PipeSinkStr = TypeVar('PipeSinkStr', str, 'Pipe', 'Sink') PipeFuncOutput = TypeVar('PipeFuncOutput') PipeFuncInput = TypeVar('PipeFuncInput') class Pipe(object): def __init__(self, func: Callable[[Iterator[PipeFuncInput]], Iterator[PipeFuncOutput]] ) -> None: self.func = func def run(self, generator: Iterator[PipeFuncInput] ) -> Iterator[PipeFuncOutput]: for x in self.func(generator): yield x def __rshift__(self, rhs: PipeSinkStr) -> Union['Pipe', 'Sink']: if isinstance(rhs, Pipe): return Pipe(lambda generator: rhs.run(self.run(generator))) elif isinstance(rhs, Sink): return Sink(lambda generator: rhs.run(self.run(generator))) elif isinstance(rhs, str): return Sink(lambda generator: write(rhs).run(self.run(generator))) else: raise TypeError("Unexpected pipe type {0}".format(type(rhs))) class Sink(object): def __init__(self, func): self.func = func def run(self, generator): return self.func(generator) class Source(object): def __init__(self, generator): self.generator = generator def __rshift__(self, rhs): if isinstance(rhs, Pipe): return Source(value for value in rhs.run(self.generator)) elif isinstance(rhs, Sink): return rhs.run(self.generator) elif isinstance(rhs, str): return write(rhs).run(self.generator) else: raise TypeError("Unknown pipe type {0}".format(type(rhs))) def source(generator): def f(*args, **kwargs): return Source(generator(*args, **kwargs)) f.__doc__ = generator.__doc__ return f def sink(consumer): def f(*args, **kwargs): return Sink(lambda g: consumer(g, *args, **kwargs)) f.__doc__ = consumer.__doc__ return f def transform(transformation): def f(*args, **kwargs): def t(generator): for value in generator: yield transformation(value, *args, **kwargs) return Pipe(t) f.__doc__ = transformation.__doc__ return f def pipe(p): def f(*args, **kwargs): return Pipe(lambda generator: p(generator, *args, **kwargs)) f.__doc__ = p.__doc__ return f def foreach(transformation): def f(*args, **kwargs): def t(generator): for value in generator: for yielded in transformation(value, *args, **kwargs): yield yielded return Pipe(t) f.__doc__ = transformation.__doc__ return f @source def read(*filenames): chunk_size = 1024 for filename in filenames: with open(filename, "rb") as handle: while True: data = handle.read(chunk_size) if data: yield Chunk(data) else: break def cat(*filenames): return read(*filenames) >> lines() @sink def write(generator, filename): with open(filename, "wb") as handle: for line in generator: if isinstance(line, Chunk): line = line.content if isinstance(line, str): line = line.encode('utf-8') handle.write(line) @foreach def require(value, predicate): if predicate(value): yield value @pipe
MIT License
pmatigakis/hopfieldnet
src/hopfieldnet/net.py
HopfieldNetwork.set_weights
python
def set_weights(self, weights): if weights.shape != (self._num_inputs, self._num_inputs): raise InvalidWeightsException() self._weights = weights
Update the weights array
https://github.com/pmatigakis/hopfieldnet/blob/c3e850d41e94383d8f6d2bf079ac706268254ba9/src/hopfieldnet/net.py#L16-L21
import numpy as np from random import randint, shuffle class InvalidWeightsException(Exception): pass class InvalidNetworkInputException(Exception): pass class HopfieldNetwork(object): def __init__(self, num_inputs): self._num_inputs = num_inputs self._weights = np.random.uniform(-1.0, 1.0, (num_inputs, num_inputs))
MIT License
softlayer/softlayer-python
SoftLayer/managers/account.py
AccountManager.get_summary
python
def get_summary(self): mask = """mask[ nextInvoiceTotalAmount, pendingInvoice[invoiceTotalAmount], blockDeviceTemplateGroupCount, dedicatedHostCount, domainCount, hardwareCount, networkStorageCount, openTicketCount, networkVlanCount, subnetCount, userCount, virtualGuestCount ] """ return self.client.call('Account', 'getObject', mask=mask)
Gets some basic account information :return: Account object
https://github.com/softlayer/softlayer-python/blob/98feac7db01b50eddeeb45769182ab978ebeefc3/SoftLayer/managers/account.py#L34-L54
import logging from SoftLayer.exceptions import SoftLayerAPIError from SoftLayer import utils LOGGER = logging.getLogger(__name__) class AccountManager(utils.IdentifierMixin, object): _DEFAULT_BILLING_ITEM_MASK = """mask[ orderItem[id,order[id,userRecord[id,email,displayName,userStatus]]], nextInvoiceTotalRecurringAmount, location, hourlyFlag, children ]""" def __init__(self, client): self.client = client
MIT License
python-useful-helpers/exec-helpers
exec_helpers/api.py
ExecHelper.__enter__
python
def __enter__(self) -> ExecHelper: self.lock.acquire() return self
Get context manager. :return: exec helper instance with entered context manager :rtype: ExecHelper .. versionchanged:: 1.1.0 lock on enter
https://github.com/python-useful-helpers/exec-helpers/blob/3e0adfa7dded72ac1c9c93bd88db070f4c9050b6/exec_helpers/api.py#L302-L311
from __future__ import annotations import abc import datetime import logging import pathlib import threading import typing from exec_helpers import constants from exec_helpers import exceptions from exec_helpers import exec_result from exec_helpers import proc_enums from exec_helpers.exec_result import OptionalStdinT from exec_helpers.proc_enums import ExitCodeT from . import _helpers if typing.TYPE_CHECKING: import types __all__ = ( "ExecHelper", "ExecuteAsyncResult", "CalledProcessErrorSubClassT", "OptionalStdinT", "OptionalTimeoutT", "CommandT", "LogMaskReT", "ErrorInfoT", "ChRootPathSetT", "ExpectedExitCodesT", ) CommandT = typing.Union[str, typing.Iterable[str]] LogMaskReT = typing.Optional[str] ErrorInfoT = typing.Optional[str] ChRootPathSetT = typing.Optional[typing.Union[str, pathlib.Path]] ExpectedExitCodesT = typing.Iterable[ExitCodeT] OptionalTimeoutT = typing.Union[int, float, None] CalledProcessErrorSubClassT = typing.Type[exceptions.CalledProcessError] class ExecuteAsyncResult(typing.NamedTuple): interface: typing.Any stdin: typing.Optional[typing.Any] stderr: typing.Optional[typing.Any] stdout: typing.Optional[typing.Any] started: datetime.datetime class ExecuteContext(typing.ContextManager[ExecuteAsyncResult], abc.ABC): __slots__ = ( "__command", "__stdin", "__open_stdout", "__open_stderr", "__logger", ) def __init__( self, *, command: str, stdin: typing.Optional[bytes] = None, open_stdout: bool = True, open_stderr: bool = True, logger: logging.Logger, **kwargs: typing.Any, ) -> None: self.__command = command self.__stdin = stdin self.__open_stdout = open_stdout self.__open_stderr = open_stderr self.__logger = logger if kwargs: self.__logger.warning(f"Unexpected arguments: {kwargs!r}.", stack_info=True) @property def logger(self) -> logging.Logger: return self.__logger @property def command(self) -> str: return self.__command @property def stdin(self) -> typing.Optional[bytes]: return self.__stdin @property def open_stdout(self) -> bool: return self.__open_stdout @property def open_stderr(self) -> bool: return self.__open_stderr class _ChRootContext(typing.ContextManager[None]): __slots__ = ("_conn", "_chroot_status", "_path") def __init__(self, conn: ExecHelper, path: ChRootPathSetT = None) -> None: self._conn: ExecHelper = conn self._chroot_status: typing.Optional[str] = conn._chroot_path if path is None or isinstance(path, str): self._path: typing.Optional[str] = path elif isinstance(path, pathlib.Path): self._path = path.as_posix() else: raise TypeError(f"path={path!r} is not instance of {ChRootPathSetT}") def __enter__(self) -> None: self._conn.__enter__() self._chroot_status = self._conn._chroot_path self._conn._chroot_path = self._path def __exit__( self, exc_type: typing.Optional[typing.Type[BaseException]], exc_val: typing.Optional[BaseException], exc_tb: typing.Optional[types.TracebackType], ) -> None: self._conn._chroot_path = self._chroot_status self._conn.__exit__(exc_type=exc_type, exc_val=exc_val, exc_tb=exc_tb) class ExecHelper( typing.Callable[..., exec_result.ExecResult], typing.ContextManager["ExecHelper"], abc.ABC, ): __slots__ = ("__lock", "__logger", "log_mask_re", "__chroot_path") def __init__(self, log_mask_re: LogMaskReT = None, *, logger: logging.Logger) -> None: self.__lock = threading.RLock() self.__logger: logging.Logger = logger self.log_mask_re: LogMaskReT = log_mask_re self.__chroot_path: typing.Optional[str] = None @property def logger(self) -> logging.Logger: return self.__logger @property def lock(self) -> threading.RLock: return self.__lock @property def _chroot_path(self) -> typing.Optional[str]: return self.__chroot_path @_chroot_path.setter def _chroot_path(self, new_state: ChRootPathSetT) -> None: if new_state is None or isinstance(new_state, str): self.__chroot_path = new_state elif isinstance(new_state, pathlib.Path): self.__chroot_path = new_state.as_posix() else: raise TypeError(f"chroot_path is expected to be string, but set {new_state!r}") @_chroot_path.deleter def _chroot_path(self) -> None: self.__chroot_path = None def chroot(self, path: ChRootPathSetT) -> _ChRootContext: return _ChRootContext(conn=self, path=path)
Apache License 2.0
hazyresearch/bootleg
bootleg/utils/classes/dotted_dict.py
is_json
python
def is_json(value): if is_number(value): return False try: ujson.loads(value) except ValueError: return False return True
Return true if is json.
https://github.com/hazyresearch/bootleg/blob/bf958994179dd844c9a62c08113f438b16490929/bootleg/utils/classes/dotted_dict.py#L275-L284
import keyword import re import string import ujson class DottedDict(dict): def __init__(self, *args, **kwargs): for arg in args: if isinstance(arg, dict): self._parse_input_(arg) elif isinstance(arg, list): for k, v in arg: self.__setitem__(k, v) elif hasattr(arg, "__iter__"): for k, v in list(arg): self.__setitem__(k, v) if kwargs: self._parse_input_(kwargs) def __delattr__(self, item): self.__delitem__(item) def __delitem__(self, key): super(DottedDict, self).__delitem__(key) del self.__dict__[key] def __getattr__(self, attr): try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def __getitem__(self, key): return self.__dict__[key] def __repr__(self): return "{0}({1})".format( type(self).__name__, super(DottedDict, self).__repr__() ) def __setattr__(self, key, value): self.__setitem__(key, value) def __setitem__(self, key, value): try: self._is_valid_identifier_(key) except ValueError: if not keyword.iskeyword(key): key = self._make_safe_(key) else: raise ValueError('Key "{0}" is a reserved keyword.'.format(key)) super(DottedDict, self).__setitem__(key, value) self.__dict__.update({key: value}) def _is_valid_identifier_(self, identifier): if re.match("[a-zA-Z_][a-zA-Z0-9_]*$", str(identifier)): if not keyword.iskeyword(identifier): return True raise ValueError('Key "{0}" is not a valid identifier.'.format(identifier)) def _make_safe_(self, key): key = str(key) allowed = string.ascii_letters + string.digits + "_" + "/" if " " in key: key = key.replace(" ", "_") diff = set(key).difference(set(allowed)) if diff: for char in diff: key = key.replace(char, "_") try: int(key[0]) except ValueError: pass else: key = "_{0}".format(key) return key def _parse_input_(self, input_item): for key, value in input_item.items(): if isinstance(value, dict): value = DottedDict(**{str(k): v for k, v in value.items()}) if isinstance(value, list): _list = [] for item in value: if isinstance(item, dict): _list.append(DottedDict(item)) else: _list.append(item) value = _list self.__setitem__(key, value) def copy(self): return type(self)(self) def to_dict(self): out = dict(self) for key, value in out.items(): if value is self: out[key] = out elif hasattr(value, "to_dict"): out[key] = value.to_dict() elif isinstance(value, list): _list = [] for item in value: if hasattr(item, "to_dict"): _list.append(item.to_dict()) else: _list.append(item) out[key] = _list return out class PreserveKeysDottedDict(dict): def __init__(self, *args, **kwargs): for arg in args: if isinstance(arg, dict): self._parse_input_(arg) elif isinstance(arg, list): for k, v in arg: self.__setitem__(k, v) elif hasattr(arg, "__iter__"): for k, v in list(arg): self.__setitem__(k, v) if kwargs: self._parse_input_(kwargs) def __delattr__(self, item): self.__delitem__(item) def __delitem__(self, key): super(PreserveKeysDottedDict, self).__delitem__(key) del self.__dict__[key] def __getattr__(self, attr): try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def __getitem__(self, key): return self.__dict__[key] def __repr__(self): return "{0}({1})".format( type(self).__name__, super(PreserveKeysDottedDict, self).__repr__() ) def __setattr__(self, key, value): self.__setitem__(key, value) def __setitem__(self, key, value): super(PreserveKeysDottedDict, self).__setitem__(key, value) self.__dict__.update({key: value}) def _parse_input_(self, input_item): for key, value in input_item.items(): if isinstance(value, dict): value = PreserveKeysDottedDict(**{str(k): v for k, v in value.items()}) if isinstance(value, list): _list = [] for item in value: if isinstance(item, dict): _list.append(PreserveKeysDottedDict(item)) else: _list.append(item) value = _list self.__setitem__(key, value) def copy(self): return type(self)(self) def to_dict(self): out = dict(self) for key, value in out.items(): if value is self: out[key] = out elif hasattr(value, "to_dict"): out[key] = value.to_dict() elif isinstance(value, list): _list = [] for item in value: if hasattr(item, "to_dict"): _list.append(item.to_dict()) else: _list.append(item) out[key] = _list return out def create_bool_dotted_dict(d_dict): if (type(d_dict) is DottedDict) or (type(d_dict) is dict): d_dict = DottedDict(d_dict) if type(d_dict) is str and is_json(d_dict): d_dict = DottedDict(ujson.loads(d_dict)) if type(d_dict) is DottedDict: for k in d_dict: if d_dict[k] == "True": d_dict[k] = True elif d_dict[k] == "False": d_dict[k] = False elif ( (type(d_dict[k]) is DottedDict) or (type(d_dict[k]) is dict) or (type(d_dict[k]) is str and is_json(d_dict[k])) ): d_dict[k] = create_bool_dotted_dict(d_dict[k]) elif type(d_dict[k]) is list: for i in range(len(d_dict[k])): d_dict[k][i] = create_bool_dotted_dict(d_dict[k][i]) return d_dict def is_number(s): try: float(s) return True except ValueError: return False
Apache License 2.0
microstrategy/mstrio-py
mstrio/api/monitors.py
cancel_jobs_v1
python
def cancel_jobs_v1(connection: "Connection", ids: List[str], fields: List[str] = None, error_msg: str = None): params = {'fields': ",".join(fields) if fields else None} if ids: body = {'jobIds': ids} return connection.session.post(url=f'{connection.base_url}/api/monitors/cancelJobs', params=params, json=body) else: raise ValueError("No ids have been passed.")
Cancel jobs specified by `ids`. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. ids(List[str]): IDs of the jobs fields(list, optional): Comma separated top-level field whitelist. This allows client to selectively retrieve part of the response model. error_msg(str, optional): Customized error message. Returns: HTTP response object returned by the MicroStrategy REST server
https://github.com/microstrategy/mstrio-py/blob/720af7e673ed62462366d1406e5ea14792461e94/mstrio/api/monitors.py#L934-L955
from concurrent.futures.thread import ThreadPoolExecutor import json from typing import List, Optional, TYPE_CHECKING, Union from unittest.mock import Mock from mstrio.api.exceptions import MstrException, PartialSuccess, Success from packaging import version from requests.adapters import Response from requests.sessions import Request from requests_futures.sessions import FuturesSession from mstrio.utils.error_handlers import bulk_operation_response_handler, ErrorHandler from mstrio.utils.helper import delete_none_values, filter_list_of_dicts, response_handler if TYPE_CHECKING: from mstrio.connection import Connection ISERVER_VERSION_11_3_2 = '11.3.0200' @ErrorHandler(err_msg='Error getting list of all projects from metadata.') def get_projects(connection: "Connection", offset: int = 0, limit: int = -1, error_msg: str = None): return connection.session.get( url=f'{connection.base_url}/api/monitors/projects', headers={'X-MSTR-ProjectID': None}, params={ 'offset': offset, 'limit': limit }, ) def get_projects_async(future_session: "FuturesSession", connection: "Connection", offset: int = 0, limit: int = -1, error_msg: str = None): url = f'{connection.base_url}/api/monitors/projects' headers = {'X-MSTR-ProjectID': None} params = {'offset': offset, 'limit': limit} future = future_session.get(url=url, headers=headers, params=params) return future @ErrorHandler( err_msg='Error getting information about nodes in the connected Intelligence Server cluster.') def get_node_info(connection: "Connection", id: str = None, node_name: str = None, error_msg: str = None): return connection.session.get( url=f'{connection.base_url}/api/monitors/iServer/nodes', headers={'X-MSTR-ProjectID': None}, params={ 'projects.id': id, 'name': node_name }, ) @ErrorHandler( err_msg='Error updating properties for a project {project_id} for cluster node {node_name}.') def update_node_properties(connection: "Connection", node_name: str, project_id: str, body: dict, error_msg: str = None, whitelist: Optional[List[tuple]] = None): return connection.session.patch( url=f'{connection.base_url}/api/monitors/iServer/nodes/{node_name}/projects/{project_id}', headers={'X-MSTR-ProjectID': None}, json=body, ) @ErrorHandler(err_msg='Error adding node {node_name} to connected Intelligence Server cluster.') def add_node(connection: "Connection", node_name: str, error_msg: str = None, whitelist: Optional[List[tuple]] = None): return connection.session.put( url=f'{connection.base_url}/api/monitors/iServer/nodes/{node_name}', headers={'X-MSTR-ProjectID': None}, ) @ErrorHandler( err_msg='Error removing node {node_name} from the connected Intelligence Server cluster.') def remove_node(connection: "Connection", node_name: str, error_msg: str = None, whitelist: Optional[List[tuple]] = None): return connection.session.delete( url=f'{connection.base_url}/api/monitors/iServer/nodes/{node_name}', headers={'X-MSTR-ProjectID': None}, ) @ErrorHandler(err_msg='Error getting user connections for {node_name} cluster node.') def get_user_connections(connection: "Connection", node_name: str, offset: int = 0, limit: int = 100, error_msg: str = None): return connection.session.get( url=f'{connection.base_url}/api/monitors/userConnections', headers={'X-MSTR-ProjectID': None}, params={ 'clusterNode': node_name, 'offset': offset, 'limit': limit }, ) def get_user_connections_async(future_session: "FuturesSession", connection: "Connection", node_name: str, offset: int = 0, limit: int = 100): params = {'clusterNode': node_name, 'offset': offset, 'limit': limit} url = f'{connection.base_url}/api/monitors/userConnections' headers = {'X-MSTR-ProjectID': None} future = future_session.get(url=url, headers=headers, params=params) return future def delete_user_connection(connection: "Connection", id: str, error_msg: str = None, bulk: bool = False): response = connection.session.delete( url=f'{connection.base_url}/api/monitors/userConnections/{id}', headers={'X-MSTR-ProjectID': None}, ) if not response.ok and bulk: if error_msg is None: error_msg = f"Error deleting user connections {id}." response_handler(response, error_msg, whitelist=[('ERR001', 500)]) return response def delete_user_connection_async(future_session: "FuturesSession", connection: "Connection", id: str, error_msg: str = None): url = f'{connection.base_url}/api/monitors/userConnections/{id}' headers = {'X-MSTR-ProjectID': None} future = future_session.delete(url=url, headers=headers) return future def delete_user_connections(connection: "Connection", ids: List[str]): body = {"userConnectionIds": ids} response = connection.session.post( url=f'{connection.base_url}/api/monitors/deleteUserConnections', json=body, ) return response @ErrorHandler(err_msg='Error getting cube cache {id} info.') def get_cube_cache_info(connection: "Connection", id: str): return connection.session.get(url=f'{connection.base_url}/api/monitors/caches/cubes/{id}') @ErrorHandler(err_msg='Error deleting cube cache with ID {id}') def delete_cube_cache(connection: "Connection", id: str, throw_error: bool = True): return connection.session.delete(url=f'{connection.base_url}/api/monitors/caches/cubes/{id}') @ErrorHandler(err_msg='Error altering cube cache {id} status.') def alter_cube_cache_status(connection: "Connection", id: str, active: bool = None, loaded: bool = None, throw_error: bool = True): if loaded is not None: loaded = 'loaded' if loaded else 'unloaded' body = {'state': {'active': active, 'loadedState': loaded}} body = delete_none_values(body) return connection.session.patch(url=f'{connection.base_url}/api/monitors/caches/cubes/{id}', json=body, headers={'Prefer': 'respond-async'}) @ErrorHandler(err_msg='Error getting list of cube caches for node {node}.') def get_cube_caches(connection: "Connection", node: str, offset: int = 0, limit: int = 1000, project_ids: str = None, loaded: bool = False, sort_by: str = None, error_msg: str = None): loaded = 'loaded' if loaded else None return connection.session.get( url=f'{connection.base_url}/api/monitors/caches/cubes', params={ 'clusterNode': node, 'offset': offset, 'limit': limit, 'projectIds': project_ids, 'state.loadedState': loaded, 'sortBy': sort_by }, ) def get_cube_caches_async(future_session: "FuturesSession", connection: "Connection", node: str, offset: int = 0, limit: int = 1000, project_ids: str = None, loaded: bool = False, sort_by: str = None): url = f'{connection.base_url}/api/monitors/caches/cubes' params = { 'clusterNode': node, 'offset': offset, 'limit': limit, 'projectIds': project_ids, 'state.loadedState': loaded, 'sortBy': sort_by } future = future_session.get(url=url, params=params) return future @ErrorHandler(err_msg='Error getting cube cache manipulation {manipulation_id} status.') def get_cube_cache_manipulation_status(connection: "Connection", manipulation_id: str, throw_error: bool = True): url = f'{connection.base_url}/api/monitors/caches/cubes/manipulations/{manipulation_id}/status' return connection.session.get(url=url) @ErrorHandler(err_msg='Error getting database connections for {nodes_names} cluster node.') def get_database_connections(connection: "Connection", nodes_names: str, error_msg: str = None): return connection.session.get( url=f'{connection.base_url}/api/monitors/dbConnectionInstances', params={'clusterNodes': nodes_names}, ) @ErrorHandler(err_msg='Error deleting database connections {connection_id}.') def delete_database_connection(connection: "Connection", connection_id: str, error_msg: str = None): url = f'{connection.base_url}/api/monitors/dbConnectionInstances/{connection_id}' return connection.session.delete(url=url) def delete_database_connection_async(future_session: "FuturesSession", connection: "Connection", connection_id: str): url = f'{connection.base_url}/api/monitors/dbConnectionInstances/{connection_id}' return future_session.delete(url=url) def get_job(connection: "Connection", id: str, node_name: str = None, fields: List[str] = None, error_msg: str = None): response = Mock() if not node_name: nodes_response = get_node_info(connection).json() nodes = nodes_response['nodes'] node_names = [node["name"] for node in nodes] if isinstance(node_name, str): node_names = [node_names] with FuturesSession(executor=ThreadPoolExecutor(max_workers=8), session=connection.session) as session: futures = (get_jobs_async(future_session=session, connection=connection, node_name=node) for node in node_names) jobs = [] for f in futures: response = f.result() if not response.ok: response_handler(response, error_msg, throw_error=False) else: jobs.extend(response.json()['jobs']) job = filter_list_of_dicts(jobs, id=id) if not job: response.status_code = 400 response.reason = f"Error getting job '{id}'" response.raise_for_status() elif len(job) > 1: response.status_code = 400 response.reason = f"More than one job with id '{id}' was found." response.raise_for_status() else: job = job[0] job = json.dumps(job).encode('utf-8') response._content = job response.status_code = 200 return response @ErrorHandler(err_msg="Error getting job {id}.") def get_job_v2(connection: "Connection", id: str, fields: List[str] = None, error_msg: str = None): return connection.session.get( url=f'{connection.base_url}/api/v2/monitors/jobs/{id}', params={ 'fields': ",".join(fields) if fields else None, }, ) @ErrorHandler(err_msg="Error getting jobs list.") def get_jobs(connection: "Connection", node_name: str, project_id: str = None, status: str = None, job_type: str = None, user_full_name: str = None, object_id: str = None, sort_by: str = None, fields: List[str] = None, error_msg: str = None) -> Response: params = { 'nodeName': node_name, 'projectId': project_id, 'status': status, 'jobType': job_type, 'userFullName': user_full_name, 'objectId': object_id, 'sortBy': sort_by, 'fields': ",".join(fields) if fields else None, }, request = Request('GET', url=f'{connection.base_url}/api/monitors/jobs', params=params) prepared_request = request.prepare() url = prepared_request.url.replace("+", "%20") return connection.session.get(url) def get_jobs_async(future_session: "FuturesSession", connection: "Connection", node_name: str, project_id: str = None, status: str = None, job_type: str = None, user_full_name: str = None, object_id: str = None, sort_by: str = None, fields: List[str] = None, error_msg: str = None) -> Response: params = { 'nodeName': node_name, 'projectId': project_id, 'status': status, 'jobType': job_type, 'userFullName': user_full_name, 'objectId': object_id, 'sortBy': sort_by, 'fields': ",".join(fields) if fields else None, } request = Request('GET', url=f'{connection.base_url}/api/monitors/jobs', params=params) prepared_request = request.prepare() url = prepared_request.url.replace("+", "%20") return future_session.get(url) @ErrorHandler(err_msg="Error getting jobs list") def get_jobs_v2(connection: "Connection", node_name: str, user: Union[List[str], str] = None, description: str = None, type: Union[List[str], str] = None, status: Union[List[str], str] = None, object_id: Union[List[str]] = None, object_type: Union[List[str], str] = None, project_id: Union[List[str], str] = None, project_name: Union[List[str], str] = None, pu_name: Union[List[str], str] = None, subscription_type: Union[List[str], str] = None, subscription_recipient: Union[List[str], str] = None, memory_usage: str = None, elapsed_time: str = None, sort_by: str = None, fields: List[str] = None, error_msg: str = None): params = { 'nodeName': node_name, 'user': user, 'description': description, 'type': type, 'status': status, 'objectId': object_id, 'objectType': object_type, 'projectId': project_id, 'projectName': project_name, 'puName': pu_name, 'subscriptionType': subscription_type, 'subscriptionRecipient': subscription_recipient, 'memoryUsage': memory_usage, 'elapsedTime': elapsed_time, 'sortBy': sort_by, 'fields': ",".join(fields) if fields else None, } request = Request('GET', url=f'{connection.base_url}/api/v2/monitors/jobs', params=params) prepared_request = request.prepare() url = prepared_request.url.replace("+", "%20") return connection.session.get(url) def get_jobs_v2_async(future_session: "FuturesSession", connection: "Connection", node_name: str, user: Union[List[str], str] = None, description: str = None, type: Union[List[str], str] = None, status: Union[List[str], str] = None, object_id: Union[List[str]] = None, object_type: Union[List[str], str] = None, project_id: Union[List[str], str] = None, project_name: Union[List[str], str] = None, pu_name: Union[List[str], str] = None, subscription_type: Union[List[str], str] = None, subscription_recipient: Union[List[str], str] = None, memory_usage: str = None, elapsed_time: str = None, sort_by: str = None, fields: List[str] = None, error_msg: str = None) -> Response: params = { 'nodeName': node_name, 'user': user, 'description': description, 'type': type, 'status': status, 'objectId': object_id, 'objectType': object_type, 'projectId': project_id, 'projectName': project_name, 'puName': pu_name, 'subscriptionType': subscription_type, 'subscriptionRecipient': subscription_recipient, 'memoryUsage': memory_usage, 'elapsedTime': elapsed_time, 'sortBy': sort_by, 'fields': ",".join(fields) if fields else None, } request = Request('GET', url=f'{connection.base_url}/api/v2/monitors/jobs', params=params) prepared_request = request.prepare() url = prepared_request.url.replace("+", "%20") return future_session.get(url) @ErrorHandler(err_msg="Error killing job {id}") def cancel_job(connection: "Connection", id: str, fields: List[str] = None, error_msg: str = None): if version.parse(connection.iserver_version) == version.parse(ISERVER_VERSION_11_3_2): return cancel_job_v1(connection, id, fields, error_msg) else: return cancel_job_v2(connection, id, fields, error_msg) @ErrorHandler(err_msg="Error killing job {id}") def cancel_job_v1(connection: "Connection", id: str, fields: List[str] = None, error_msg: str = None): params = {'fields': ",".join(fields) if fields else None} return connection.session.delete(url=f'{connection.base_url}/api/monitors/jobs/{id}', params=params) @ErrorHandler(err_msg="Error killing job {id}") def cancel_job_v2(connection: "Connection", id: str, fields: List[str] = None, error_msg: str = None): params = {'fields': ",".join(fields) if fields else None} return connection.session.delete(url=f'{connection.base_url}/api/v2/monitors/jobs/{id}', params=params) def cancel_jobs(connection: "Connection", ids: List[str], fields: List[str] = None, error_msg: str = None) -> Union[Success, PartialSuccess, MstrException]: if version.parse(connection.iserver_version) == version.parse(ISERVER_VERSION_11_3_2): response = cancel_jobs_v1(connection, ids, fields, error_msg) else: response = cancel_jobs_v2(connection, ids, fields, error_msg) return bulk_operation_response_handler(response, "jobCancellationStatus")
Apache License 2.0
awslabs/autogluon
core/src/autogluon/core/task/base/base_predictor.py
BasePredictor.save
python
def save(self, output_directory): filepath = output_directory + PREDICTOR_FILENAME self._save_model(output_directory) self._save_results(output_directory) self.model = None self.results = None pickle.dump(self, open(filepath, 'wb')) logger.info("Predictor saved to file: %s " % filepath)
Saves this object to file. Don't forget to save the models and the Results objects if they exist. Before returning a Predictor, task.fit() should call predictor.save()
https://github.com/awslabs/autogluon/blob/e26e7b23f17fac9f5fb761096a6a49fe94de496b/core/src/autogluon/core/task/base/base_predictor.py#L78-L88
import json import logging import pickle from abc import ABC, abstractmethod from ...utils import plot_performance_vs_trials, plot_summary_of_models logger = logging.getLogger(__name__) __all__ = ['BasePredictor'] PREDICTOR_FILENAME = "predictor.pkl" RESULTS_FILENAME = "results.json" class BasePredictor(ABC): def __init__(self, loss_func, eval_func, model=None, results=None, **kwargs): self.model = model self.loss_func = loss_func self.eval_func = eval_func self.results = results @classmethod @abstractmethod def load(cls, output_directory): filepath = output_directory + PREDICTOR_FILENAME results_file = output_directory + RESULTS_FILENAME predictor = pickle.load(open(filepath, "rb")) predictor.results = json.load(open(results_file, 'r')) pass @abstractmethod
Apache License 2.0
biolink/biolink-api
biolink/api/relations/endpoints/relation_usage.py
RelationUsagePivotResource.get
python
def get(self): args = parser.parse_args() return search_associations( rows=0, facet_fields=[M.RELATION], facet_pivot_fields=[M.SUBJECT_CATEGORY, M.OBJECT_CATEGORY, M.RELATION], user_agent=USER_AGENT, **args )
Relation usage count for all subj x obj category combinations
https://github.com/biolink/biolink-api/blob/d10e95f160cf882515cc9892442741ddc6cf7ccb/biolink/api/relations/endpoints/relation_usage.py#L61-L73
import logging from flask import request from flask_restplus import Resource from biolink.datamodel.serializers import association, association_results from biolink.api.restplus import api from ontobio.golr.golr_associations import get_association, search_associations, GolrFields from biolink import USER_AGENT log = logging.getLogger(__name__) M=GolrFields() parser = api.parser() parser.add_argument('subject_taxon', help='SUBJECT TAXON id, e.g. NCBITaxon:9606. Includes inferred by default') parser.add_argument('evidence', help="""Object id, e.g. ECO:0000501 (for IEA; Includes inferred by default) or a specific publication or other supporting ibject, e.g. ZFIN:ZDB-PUB-060503-2. """) class RelationUsageResource(Resource): @api.expect(parser) @api.marshal_list_with(association_results) def get(self): args = parser.parse_args() return search_associations( rows=0, facet_fields=[M.RELATION], facet_pivot_fields=[M.SUBJECT_CATEGORY, M.OBJECT_CATEGORY, M.RELATION], user_agent=USER_AGENT, **args ) class RelationUsageBetweenResource(Resource): @api.expect(parser) @api.marshal_list_with(association_results) def get(self, subject_category, object_category): args = parser.parse_args() return search_associations( rows=0, subject_category=subject_category, object_category=object_category, facet_fields=[M.RELATION, M.RELATION_LABEL], user_agent=USER_AGENT, **args ) class RelationUsagePivotResource(Resource): @api.expect(parser) @api.marshal_list_with(association_results)
BSD 3-Clause New or Revised License
blacktear23/py-servicebus
servicebus/pika/adapters/blocking_connection.py
_ConsumerCancellationEvt.__init__
python
def __init__(self, method_frame): self.method_frame = method_frame
:param pika.frame.Method method_frame: method frame with method of type `spec.Basic.Cancel`
https://github.com/blacktear23/py-servicebus/blob/c3d6ccf0b2abf131ca1060d89f3c0d4ab08481e4/servicebus/pika/adapters/blocking_connection.py#L815-L820
from collections import namedtuple, deque import contextlib import functools import logging import time import servicebus.pika.channel from servicebus.pika import compat from servicebus.pika import exceptions from servicebus.pika import spec from servicebus.pika.adapters.select_connection import SelectConnection LOGGER = logging.getLogger(__name__) class _CallbackResult(object): __slots__ = ('_value_class', '_ready', '_values') def __init__(self, value_class=None): self._value_class = value_class self._ready = None self._values = None self.reset() def reset(self): self._ready = False self._values = None def __bool__(self): return self.is_ready() __nonzero__ = __bool__ def __enter__(self): return self def __exit__(self, *args, **kwargs): self.reset() def is_ready(self): return self._ready @property def ready(self): return self._ready def signal_once(self, *_args, **_kwargs): assert not self._ready, '_CallbackResult was already set' self._ready = True def set_value_once(self, *args, **kwargs): self.signal_once() try: self._values = (self._value_class(*args, **kwargs),) except Exception: LOGGER.error( "set_value_once failed: value_class=%r; args=%r; kwargs=%r", self._value_class, args, kwargs) raise def append_element(self, *args, **kwargs): assert not self._ready or isinstance(self._values, list), ( '_CallbackResult state is incompatible with append_element: ' 'ready=%r; values=%r' % (self._ready, self._values)) try: value = self._value_class(*args, **kwargs) except Exception: LOGGER.error( "append_element failed: value_class=%r; args=%r; kwargs=%r", self._value_class, args, kwargs) raise if self._values is None: self._values = [value] else: self._values.append(value) self._ready = True @property def value(self): assert self._ready, '_CallbackResult was not set' assert isinstance(self._values, tuple) and len(self._values) == 1, ( '_CallbackResult value is incompatible with set_value_once: %r' % (self._values,)) return self._values[0] @property def elements(self): assert self._ready, '_CallbackResult was not set' assert isinstance(self._values, list) and len(self._values) > 0, ( '_CallbackResult value is incompatible with append_element: %r' % (self._values,)) return self._values class _IoloopTimerContext(object): def __init__(self, duration, connection): assert hasattr(connection, 'add_timeout'), connection self._duration = duration self._connection = connection self._callback_result = _CallbackResult() self._timer_id = None def __enter__(self): self._timer_id = self._connection.add_timeout( self._duration, self._callback_result.signal_once) return self def __exit__(self, *_args, **_kwargs): if not self._callback_result: self._connection.remove_timeout(self._timer_id) def is_ready(self): return self._callback_result.is_ready() class _TimerEvt(object): __slots__ = ('timer_id', '_callback') def __init__(self, callback): self._callback = callback self.timer_id = None def __repr__(self): return '%s(timer_id=%s, callback=%s)' % (self.__class__.__name__, self.timer_id, self._callback) def dispatch(self): self._callback() class _ConnectionBlockedUnblockedEvtBase(object): __slots__ = ('_callback', '_method_frame') def __init__(self, callback, method_frame): self._callback = callback self._method_frame = method_frame def __repr__(self): return '%s(callback=%s, frame=%s)' % (self.__class__.__name__, self._callback, self._method_frame) def dispatch(self): self._callback(self._method_frame) class _ConnectionBlockedEvt( _ConnectionBlockedUnblockedEvtBase): pass class _ConnectionUnblockedEvt( _ConnectionBlockedUnblockedEvtBase): pass class BlockingConnection(object): _OnOpenedArgs = namedtuple('BlockingConnection__OnOpenedArgs', 'connection') _OnOpenErrorArgs = namedtuple('BlockingConnection__OnOpenErrorArgs', 'connection error_text') _OnClosedArgs = namedtuple('BlockingConnection__OnClosedArgs', 'connection reason_code reason_text') _OnChannelOpenedArgs = namedtuple( 'BlockingConnection__OnChannelOpenedArgs', 'channel') def __init__(self, parameters=None, _impl_class=None): self._event_dispatch_suspend_depth = 0 self._ready_events = deque() self._channels_pending_dispatch = set() self._opened_result = _CallbackResult(self._OnOpenedArgs) self._open_error_result = _CallbackResult(self._OnOpenErrorArgs) self._closed_result = _CallbackResult(self._OnClosedArgs) self._user_initiated_close = False impl_class = _impl_class or SelectConnection self._impl = impl_class( parameters=parameters, on_open_callback=self._opened_result.set_value_once, on_open_error_callback=self._open_error_result.set_value_once, on_close_callback=self._closed_result.set_value_once, stop_ioloop_on_close=False) self._process_io_for_connection_setup() def _cleanup(self): self._ready_events.clear() self._opened_result.reset() self._open_error_result.reset() self._closed_result.reset() @contextlib.contextmanager def _acquire_event_dispatch(self): try: self._event_dispatch_suspend_depth += 1 yield self._event_dispatch_suspend_depth == 1 finally: self._event_dispatch_suspend_depth -= 1 def _process_io_for_connection_setup(self): self._flush_output(self._opened_result.is_ready, self._open_error_result.is_ready) if self._open_error_result.ready: raise exceptions.AMQPConnectionError( self._open_error_result.value.error_text) assert self._opened_result.ready assert self._opened_result.value.connection is self._impl def _flush_output(self, *waiters): if self._impl.is_closed: raise exceptions.ConnectionClosed() is_done = (lambda: self._closed_result.ready or (not self._impl.outbound_buffer and (not waiters or any(ready() for ready in waiters)))) while not is_done(): self._impl.ioloop.poll() self._impl.ioloop.process_timeouts() if self._closed_result.ready: try: result = self._closed_result.value if result.reason_code not in [0, 200]: LOGGER.critical('Connection close detected; result=%r', result) raise exceptions.ConnectionClosed(result.reason_code, result.reason_text) elif not self._user_initiated_close: LOGGER.critical('Connection close detected') raise exceptions.ConnectionClosed() else: LOGGER.debug('Connection closed; result=%r', result) finally: self._cleanup() def _request_channel_dispatch(self, channel_number): self._channels_pending_dispatch.add(channel_number) def _dispatch_channel_events(self): if not self._channels_pending_dispatch: return with self._acquire_event_dispatch() as dispatch_acquired: if not dispatch_acquired: return candidates = list(self._channels_pending_dispatch) self._channels_pending_dispatch.clear() for channel_number in candidates: if channel_number < 0: continue try: impl_channel = self._impl._channels[channel_number] except KeyError: continue if impl_channel.is_open: impl_channel._get_cookie()._dispatch_events() def _on_timer_ready(self, evt): self._ready_events.append(evt) def _on_connection_blocked(self, user_callback, method_frame): self._ready_events.append( _ConnectionBlockedEvt(user_callback, method_frame)) def _on_connection_unblocked(self, user_callback, method_frame): self._ready_events.append( _ConnectionUnblockedEvt(user_callback, method_frame)) def _dispatch_connection_events(self): if not self._ready_events: return with self._acquire_event_dispatch() as dispatch_acquired: if not dispatch_acquired: return for _ in compat.xrange(len(self._ready_events)): try: evt = self._ready_events.popleft() except IndexError: break evt.dispatch() def add_on_connection_blocked_callback(self, callback_method): self._impl.add_on_connection_blocked_callback( functools.partial(self._on_connection_blocked, callback_method)) def add_on_connection_unblocked_callback(self, callback_method): self._impl.add_on_connection_unblocked_callback( functools.partial(self._on_connection_unblocked, callback_method)) def add_timeout(self, deadline, callback_method): if not callable(callback_method): raise ValueError( 'callback_method parameter must be callable, but got %r' % (callback_method,)) evt = _TimerEvt(callback=callback_method) timer_id = self._impl.add_timeout( deadline, functools.partial(self._on_timer_ready, evt)) evt.timer_id = timer_id return timer_id def remove_timeout(self, timeout_id): self._impl.remove_timeout(timeout_id) for i, evt in enumerate(self._ready_events): if isinstance(evt, _TimerEvt) and evt.timer_id == timeout_id: index_to_remove = i break else: return del self._ready_events[index_to_remove] def close(self, reply_code=200, reply_text='Normal shutdown'): LOGGER.debug('Closing connection (%s): %s', reply_code, reply_text) self._user_initiated_close = True for impl_channel in compat.dictvalues(self._impl._channels): channel = impl_channel._get_cookie() if channel.is_open: channel.close(reply_code, reply_text) self._impl.close(reply_code, reply_text) self._flush_output(self._closed_result.is_ready) def process_data_events(self, time_limit=0): common_terminator = lambda: bool( self._channels_pending_dispatch or self._ready_events) if time_limit is None: self._flush_output(common_terminator) else: with _IoloopTimerContext(time_limit, self._impl) as timer: self._flush_output(timer.is_ready, common_terminator) if self._ready_events: self._dispatch_connection_events() if self._channels_pending_dispatch: self._dispatch_channel_events() def sleep(self, duration): assert duration >= 0, duration deadline = time.time() + duration time_limit = duration while True: self.process_data_events(time_limit) time_limit = deadline - time.time() if time_limit <= 0: break def channel(self, channel_number=None): with _CallbackResult(self._OnChannelOpenedArgs) as opened_args: impl_channel = self._impl.channel( on_open_callback=opened_args.set_value_once, channel_number=channel_number) channel = BlockingChannel(impl_channel, self) impl_channel._set_cookie(channel) channel._flush_output(opened_args.is_ready) return channel def __enter__(self): return self def __exit__(self, tp, value, traceback): self.close() @property def is_closed(self): return self._impl.is_closed @property def is_closing(self): return self._impl.is_closing @property def is_open(self): return self._impl.is_open @property def basic_nack_supported(self): return self._impl.basic_nack @property def consumer_cancel_notify_supported(self): return self._impl.consumer_cancel_notify @property def exchange_exchange_bindings_supported(self): return self._impl.exchange_exchange_bindings @property def publisher_confirms_supported(self): return self._impl.publisher_confirms basic_nack = basic_nack_supported consumer_cancel_notify = consumer_cancel_notify_supported exchange_exchange_bindings = exchange_exchange_bindings_supported publisher_confirms = publisher_confirms_supported class _ChannelPendingEvt(object): pass class _ConsumerDeliveryEvt(_ChannelPendingEvt): __slots__ = ('method', 'properties', 'body') def __init__(self, method, properties, body): self.method = method self.properties = properties self.body = body class _ConsumerCancellationEvt(_ChannelPendingEvt): __slots__ = ('method_frame')
BSD 3-Clause New or Revised License
jbasko/configmanager
configmanager/sections.py
Section.get_item
python
def get_item(self, *key): item = self._get_item_or_section(key) if not item.is_item: raise RuntimeError('{} is a section, not an item'.format(key)) return item
The recommended way of retrieving an item by key when extending configmanager's behaviour. Attribute and dictionary key access is configurable and may not always return items (see PlainConfig for example), whereas this method will always return the corresponding Item as long as NOT_FOUND hook callbacks don't break this convention. Args: *key Returns: item (:class:`.Item`):
https://github.com/jbasko/configmanager/blob/6547224f11ae643aeaa6b141180a2149cd432032/configmanager/sections.py#L228-L244
import collections import copy import functools import keyword import six from hookery import HookRegistry from .schema_parser import parse_config_schema from .meta import ConfigManagerSettings from .exceptions import NotFound from .utils import not_set from .base import BaseSection, is_config_item, is_config_section _iter_emitters = { 'path': lambda k, v, _: (k, v), 'name': lambda k, v, _: (v.alias, v) if v.is_section else (v.name, v), 'alias': lambda k, v, _: (v.alias, v) if v.is_section else (v.name, v), 'str_path': lambda k, v, sep: (sep.join(k), v), None: lambda k, v, sep: v, } class _SectionHooks(HookRegistry): def __init__(self, section): super(_SectionHooks, self).__init__(section) self.not_found = self.register_event('not_found') self.item_added_to_section = self.register_event('item_added_to_section') self.section_added_to_section = self.register_event('section_added_to_section') self.item_value_changed = self.register_event('item_value_changed') class Section(BaseSection): _default_settings = ConfigManagerSettings(immutable=True) def __init__(self, schema=None, section=None): self._tree = collections.OrderedDict() self._section = section self._section_alias = None self._hooks = _SectionHooks(self) self._hooks.hook_registered(self._hook_registered) self.__item_attributes = {} if schema is not None: self.add_schema(schema) def __len__(self): return len(self._tree) def __nonzero__(self): return True def __bool__(self): return True def __iter__(self): for name in self._tree.keys(): yield name def __repr__(self): return '<{cls} {alias} at {id}>'.format(cls=self.__class__.__name__, alias=self.alias, id=id(self)) def __contains__(self, key): try: _ = self._get_by_key(key, handle_not_found=False) return True except NotFound: return False def __setitem__(self, key, value): if isinstance(key, six.string_types): name = key rest = None elif isinstance(key, (tuple, list)) and len(key) > 0: name = key[0] if len(key) == 1: rest = None else: rest = key[1:] else: raise TypeError('Expected either a string or a tuple as key, got {!r}'.format(key)) if rest: self[name][rest] = value return self._set_key(name, value) def __getitem__(self, key): return self._get_by_key(key) def __getattr__(self, name): if not isinstance(name, six.string_types): raise TypeError('Expected a string, got a {!r}'.format(type(name))) if name.startswith('_'): raise AttributeError(name) return self._get_by_key(name) def __setattr__(self, name, value): if name.startswith('_'): return super(Section, self).__setattr__(name, value) self._set_key(name, value) def _default_key_setter(self, name, subject): if is_config_item(subject): self.add_item(name, subject) elif is_config_section(subject): self.add_section(name, subject) else: raise TypeError( 'Section items can only be replaced with items, ' 'got {type}. To set item value use ...{name}.value = <new_value>'.format( type=type(subject), name=name, ) ) def _set_key(self, key, value): if is_config_section(value): self.add_section(key, value) return if key not in self._tree or self.settings.key_setter is None: if is_config_item(value): self.add_item(key, value) return raise TypeError( 'Section sections/items can only be replaced with sections/items, ' 'got {type}. To set value use ..[{name}].value = <new_value>'.format( type=type(value), name=key, ) ) if self.settings.key_setter is None: self._default_key_setter(key, value) else: self.settings.key_setter(subject=self._tree[key], value=value, default_key_setter=self._default_key_setter) def _get_by_key(self, key, handle_not_found=True): resolution = self._get_item_or_section(key, handle_not_found=handle_not_found) if self.settings.key_getter: return self.settings.key_getter(parent=self, subject=resolution) else: return resolution def _get_item_or_section(self, key, handle_not_found=True): if isinstance(key, six.string_types): if self.settings.str_path_separator in key: return self._get_item_or_section(key.split(self.settings.str_path_separator)) if key.endswith('_') and keyword.iskeyword(key[:-1]): key = key[:-1] if key in self._tree: resolution = self._tree[key] else: if handle_not_found: result = self.dispatch_event(self.hooks.not_found, name=key, section=self) if result is not None: resolution = result else: raise NotFound(key, section=self) else: raise NotFound(key, section=self) elif isinstance(key, (tuple, list)) and len(key) > 0: if len(key) == 1: resolution = self._get_item_or_section(key[0], handle_not_found=handle_not_found) else: resolution = self._get_item_or_section( key[0], handle_not_found=handle_not_found )._get_item_or_section(key[1:], handle_not_found=handle_not_found) else: raise TypeError('Expected either a string or a tuple as key, got {!r}'.format(key)) return resolution
MIT License
boreq/botnet
botnet/modules/builtin/irc.py
InactivityMonitor._set_timers
python
def _set_timers(self): self._set_ping(self.ping_timeout) self._set_abort(self.abort_timeout)
Schedule the execution of the timers.
https://github.com/boreq/botnet/blob/5146a748d3f48d15a58aeb083b9dae129d995894/botnet/modules/builtin/irc.py#L72-L75
import select import datetime import socket import ssl import threading import time import fnmatch from ...logging import get_logger from ...message import Message from ...signals import message_in, message_out, on_exception, config_changed from .. import BaseResponder from ..lib import parse_command class NoopWith(object): def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): pass class InactivityMonitor(object): ping_timeout = 60 ping_repeat = 10 abort_timeout = 240 def __init__(self, irc_module): self.logger = get_logger(self) self.irc_module = irc_module self._timer_ping = None self._timer_abort = None def __enter__(self): message_in.connect(self.on_message_in) self._set_timers() return self def __exit__(self, exception_type, exception_value, traceback): message_in.disconnect(self.on_message_in) self._clear_timers() def _clear_timers(self): for timer in [self._timer_ping, self._timer_abort]: if timer is not None: timer.cancel() def _set_ping(self, timeout): self._timer_ping = threading.Timer(timeout, self.on_timer_ping) self._timer_ping.start() def _set_abort(self, timeout): self._timer_abort = threading.Timer(timeout, self.on_timer_abort) self._timer_abort.start()
MIT License
spcl/stencilflow
stencilflow/kernel_chain_graph.py
KernelChainGraph.create_kernels
python
def create_kernels(self) -> None: self.kernel_nodes = dict() for kernel in self.program: new_node = Kernel( name=kernel, kernel_string=str(self.program[kernel]['computation_string']), dimensions=self.dimensions, data_type=self.program[kernel]['data_type'], boundary_conditions=self.program[kernel] ['boundary_conditions'], raw_inputs=self.inputs, vectorization=self.vectorization, ) self.graph.add_node(new_node) self.kernel_nodes[kernel] = new_node self.input_nodes = dict() for inp in self.inputs: new_node = Input(name=inp, data_type=self.inputs[inp]["data_type"], data_queue=BoundedQueue( name=inp, maxsize=self.total_elements(), collection=[])) self.input_nodes[inp] = new_node self.graph.add_node(new_node) self.output_nodes = dict() for out in self.outputs: new_node = Output(name=out, data_type=self.program[out]["data_type"], dimensions=self.dimensions, data_queue=BoundedQueue(name="dummy", maxsize=0)) self.output_nodes[out] = new_node self.graph.add_node(new_node)
Create the kernels and add them to the networkx (library) graph.
https://github.com/spcl/stencilflow/blob/28bb88e7f4251f29aecc266663bc780023ed2549/stencilflow/kernel_chain_graph.py#L417-L455
__author__ = "Andreas Kuster (kustera@ethz.ch)" __copyright__ = "BSD 3-Clause License" import argparse import ast import copy import functools import operator import re import os from typing import Any, List, Dict, Tuple import networkx as nx import stencilflow from stencilflow.log_level import LogLevel from stencilflow.kernel import Kernel from stencilflow.bounded_queue import BoundedQueue from stencilflow.input import Input from stencilflow.output import Output from stencilflow.simulator import Simulator class KernelChainGraph: def __init__(self, path: str, plot_graph: bool = False, log_level: LogLevel = LogLevel.NO_LOG) -> None: if log_level >= LogLevel.MODERATE: print("Initialize KernelChainGraph.") self.path: str = os.path.abspath(path) self.log_level: LogLevel = log_level self.inputs: Dict[str, Dict[str, str]] = dict() self.outputs: List[str] = list() self.dimensions: List[int] = list() self.program: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] = dict( ) self.vectorization = 1 self.kernel_latency = None self.channels: Dict[ str, BoundedQueue] = dict() self.graph: nx.DiGraph = nx.DiGraph() self.input_nodes: Dict[str, Kernel] = dict() self.output_nodes: Dict[str, Kernel] = dict() self.kernel_nodes: Dict[str, Kernel] = dict() self.config = stencilflow.parse_json("stencil_chain.config") self.name = os.path.splitext(os.path.basename(self.path))[0] self.kernel_dimensions = -1 self.constants = {} if self.log_level >= LogLevel.MODERATE: print("Read input config files.") self.import_input() if self.log_level >= LogLevel.MODERATE: print("Create all kernels.") self.create_kernels() if self.log_level >= LogLevel.MODERATE: print("Compute kernel latencies.") self.compute_kernel_latency() if self.log_level >= LogLevel.MODERATE: print("Connect kernels.") self.connect_kernels() if self.log_level >= LogLevel.MODERATE: print("Compute delay buffer sizes.") self.compute_delay_buffer() if self.log_level >= LogLevel.MODERATE: print("Add channels to the graph edges.") if plot_graph: if self.log_level >= LogLevel.MODERATE: print("Plot kernel chain graph.") self.plot_graph(self.name + ".png") if self.log_level >= LogLevel.MODERATE: print("Plot computation graph of each kernel.") self.add_channels( ) for kernel in self.program: if "sin" in self.program[kernel]["computation_string"] or "cos" in self.program[kernel]["computation_string"] or "tan" in self.program[kernel]["computation_string"]: print( "Warning: Computation contains sinusoidal functions with experimental latency values." ) if self.log_level >= LogLevel.MODERATE: self.report(self.name) def enumerate_cuts(self) -> Tuple[List[nx.DiGraph], Dict[Any, int]]: from collections import deque from typing import Deque, Union from copy import deepcopy def getcuts(g: nx.DiGraph) -> Dict[Tuple[int], nx.DiGraph]: newcuts: Dict[Tuple[int], nx.DiGraph] = {} for n, color in g.nodes(data='color'): if color == 1: continue newgraph = deepcopy(g) newgraph.nodes[n]['color'] = 1 for d in nx.descendants(newgraph, n): newgraph.nodes[d]['color'] = 1 newcuts[tuple(nd for nd, c in newgraph.nodes(data='color') if c == 1)] = newgraph return newcuts cuts: Dict[Tuple[int], nx.DiGraph] = {} q: Deque[nx.DiGraph] = deque() initial_graph = nx.DiGraph() node_id: Dict[Union[Kernel, Input, Output], int] = {} for i, n in enumerate(self.graph.nodes): if not isinstance(n, Kernel): continue initial_graph.add_node(i, color=0) node_id[n] = i for e in self.graph.edges: if e[0] in node_id and e[1] in node_id: initial_graph.add_edge(node_id[e[0]], node_id[e[1]]) q.append(initial_graph) while len(q) > 0: g = q.pop() newcuts = getcuts(g) added = set(newcuts.keys()) - set(cuts.keys()) cuts.update(newcuts) q.extend([cuts[c] for c in added]) return list(cuts.values()), node_id def plot_graph(self, save_path: str = None) -> None: import matplotlib matplotlib.rc('text', usetex=False) import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.set_axis_off() positions = nx.nx_pydot.graphviz_layout(self.graph, prog="dot") nums = list() names = list() ops = list() outs = list() for node in self.graph.nodes: if isinstance(node, Kernel): ops.append(node) elif isinstance(node, Input): names.append(node) elif isinstance(node, Output): outs.append(node) num_nodes = (len(nums) + len(names) + len(ops) + len(outs)) fig_size = num_nodes if (num_nodes > 10) else 10 fig.set_size_inches(fig_size, fig_size) labels = dict() for node in self.graph.nodes: labels[node] = node.generate_label() nx.draw_networkx_nodes(self.graph, positions, nodelist=names, node_color='orange', node_size=3000, node_shape='s', edgecolors='black') nx.draw_networkx_nodes(self.graph, positions, nodelist=outs, node_color='green', node_size=3000, node_shape='s') nx.draw_networkx_nodes(self.graph, positions, nodelist=nums, node_color='#007acc', node_size=3000, node_shape='s') nx.draw_networkx(self.graph, positions, nodelist=ops, node_color='red', node_size=3000, node_shape='o', font_weight='bold', font_size=16, edgecolors='black', arrows=True, arrowsize=36, arrowstyle='-|>', width=6, linewidths=1, with_labels=False) nx.draw_networkx_labels(self.graph, positions, labels=labels, font_weight='bold', font_size=16) if save_path is not None: fig.savefig(save_path) else: fig.show() def connect_kernels(self) -> None: for src in self.graph.nodes: for dest in self.graph.nodes: if src is not dest: if isinstance(src, Kernel) and isinstance(dest, Kernel): for inp in dest.graph.inputs: if src.name == inp.name: self.graph.add_edge(src, dest, channel=None) break elif isinstance(src, Input) and isinstance(dest, Kernel): for inp in dest.graph.inputs: if src.name == inp.name: self.graph.add_edge(src, dest, channel=None) break elif isinstance(src, Kernel) and isinstance(dest, Output): if src.name == dest.name: self.graph.add_edge(src, dest, channel=None) else: pass def add_channels(self) -> None: self.channels = dict() for src in self.graph.nodes: for dest in self.graph.nodes: if src is not dest: if isinstance(src, Kernel) and isinstance( dest, Kernel): for inp in dest.graph.inputs: if src.name == inp.name: name = src.name + "_" + dest.name channel = { "name": name, "delay_buffer": self.kernel_nodes[dest.name].delay_buffer[ src.name], "internal_buffer": dest.internal_buffer[src.name], "data_type": src.data_type } self.channels[name] = channel src.outputs[dest.name] = channel dest.inputs[src.name] = channel self.graph[src][dest]['channel'] = channel break elif isinstance(src, Input) and isinstance( dest, Kernel): for inp in dest.graph.inputs: if src.name == inp.name: name = src.name + "_" + dest.name channel = { "name": name, "delay_buffer": self.kernel_nodes[dest.name].delay_buffer[ src.name], "internal_buffer": dest.internal_buffer[src.name], "data_type": src.data_type, "input_dims": self.inputs[src.name]["input_dims"] if "input_dims" in self.inputs[src.name] else None } self.channels[name] = channel src.outputs[dest.name] = channel dest.inputs[src.name] = channel self.graph[src][dest]['channel'] = channel break elif isinstance(src, Kernel) and isinstance( dest, Output): if src.name == dest.name: name = src.name + "_" + dest.name channel = { "name": name, "delay_buffer": self.output_nodes[dest.name].delay_buffer[ src.name], "internal_buffer": {}, "data_type": src.data_type } self.channels[name] = channel src.outputs[dest.name] = channel dest.inputs[src.name] = channel self.graph[src][dest]["channel"] = channel else: pass def import_input(self) -> None: inp = stencilflow.parse_json(self.path) self.kernel_dimensions = len(inp["dimensions"]) if "constants" in inp: self.constants = copy.copy(inp["constants"]) else: self.constants = {} self.vectorization = int( inp["vectorization"]) if "vectorization" in inp else 1 self.program = inp["program"] self.inputs = inp["inputs"] for i in self.inputs.values(): if "input_dims" not in i: if "dimensions" in i: i["input_dims"] = i["dimensions"] else: i["input_dims"] = stencilflow.ITERATORS[len(stencilflow. ITERATORS) - self.kernel_dimensions:] self.outputs = inp["outputs"] if self.kernel_dimensions == 1: for entry in self.program: self.program[entry]["computation_string"] = self.program[entry]["computation_string"].replace("[", "[i, j,") self.dimensions = [ 1, 1 ] + inp["dimensions"] elif self.kernel_dimensions == 2: for entry in self.program: self.program[entry]["computation_string"] = self.program[entry]["computation_string"] .replace("[", "[i,") self.dimensions = [1] + inp["dimensions"] else: self.dimensions = inp["dimensions"] def total_elements(self) -> int: return functools.reduce(operator.mul, self.dimensions, 1)
BSD 3-Clause New or Revised License
alecrubin/pytorch-serverless
fastai/transforms.py
lighting
python
def lighting(im, b, c): if b == 0 and c == 1: return im mu = np.average(im) return np.clip((im-mu)*c+mu+b, 0., 1.).astype(np.float32)
adjusts image's balance and contrast
https://github.com/alecrubin/pytorch-serverless/blob/ce7bcfe842c022d405e639850308185b67434e53/fastai/transforms.py#L47-L51
from enum import IntEnum from .imports import * from .core import A, partition def scale_min(im, targ, interpolation=cv2.INTER_AREA): r, c, *_ = im.shape ratio = targ/min(r, c) sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ)) return cv2.resize(im, sz, interpolation=interpolation) def zoom_cv(x, z): if z == 0: return x r, c, *_ = x.shape M = cv2.getRotationMatrix2D((c/2, r/2), 0, z+1.) return cv2.warpAffine(x, M, (c, r)) def stretch_cv(x, sr, sc, interpolation=cv2.INTER_AREA): if sr == 0 and sc == 0: return x r, c, *_ = x.shape x = cv2.resize(x, None, fx=sr+1, fy=sc+1, interpolation=interpolation) nr, nc, *_ = x.shape cr = (nr-r)//2 cc = (nc-c)//2 return x[cr:r+cr, cc:c+cc] def dihedral(x, dih): x = np.rot90(x, dih%4) return x if dih < 4 else np.fliplr(x)
MIT License
wikipedialibrary/twlight
TWLight/applications/views.py
_BaseListApplicationView._filter_queryset
python
def _filter_queryset(self, base_qs, editor, partner): if editor: base_qs = base_qs.filter(editor=editor) if partner: base_qs = base_qs.filter(partner=partner) return base_qs
Handle filters that might have been passed in by post().
https://github.com/wikipedialibrary/twlight/blob/2bdad4f6e2f58a16e750eddbd691da7c1fbd8a4b/TWLight/applications/views.py#L571-L581
import logging import urllib.error import urllib.parse import urllib.request from urllib.parse import urlparse from datetime import datetime, timedelta import bleach from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit from dal import autocomplete from django import forms from django.conf import settings from django.contrib import messages from django.contrib.auth.models import User from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.urls import reverse, reverse_lazy from django.db import IntegrityError from django.db.models import Q from django.http import HttpResponseRedirect, HttpResponseBadRequest, Http404 from django.utils.translation import gettext as _ from django.views.generic.base import View from django.views.generic.detail import DetailView from django.views.generic.edit import FormView, UpdateView from django.views.generic.list import ListView from reversion import revisions as reversion from reversion.models import Version from TWLight.users.helpers.editor_data import editor_bundle_eligible from TWLight.applications.signals import no_more_accounts from TWLight.resources.models import Partner, AccessCode from TWLight.users.groups import get_coordinators from TWLight.users.models import Authorization, Editor from TWLight.view_mixins import ( PartnerCoordinatorOrSelf, CoordinatorsOnly, PartnerCoordinatorOnly, EditorsOnly, ToURequired, EmailRequired, SelfOnly, DataProcessingRequired, NotDeleted, ) from .forms import BaseApplicationForm, ApplicationAutocomplete, RenewalForm from .helpers import ( USER_FORM_FIELDS, PARTNER_FORM_OPTIONAL_FIELDS, PARTNER_FORM_BASE_FIELDS, get_output_for_application, count_valid_authorizations, get_accounts_available, is_proxy_and_application_approved, more_applications_than_accounts_available, ) from .models import Application logger = logging.getLogger(__name__) coordinators = get_coordinators() PARTNERS_SESSION_KEY = "applications_request__partner_ids" class EditorAutocompleteView(autocomplete.Select2QuerySetView): def get_queryset(self): if self.request.user.is_superuser: editor_qs = Editor.objects.all().order_by("wp_username") if self.q: editor_qs = editor_qs.filter(wp_username__istartswith=self.q).order_by( "wp_username" ) elif coordinators in self.request.user.groups.all(): editor_qs = Editor.objects.filter( applications__partner__coordinator__pk=self.request.user.pk ).order_by("wp_username") if self.q: editor_qs = editor_qs.filter(wp_username__istartswith=self.q).order_by( "wp_username" ) else: editor_qs = Editor.objects.none() return editor_qs class PartnerAutocompleteView(autocomplete.Select2QuerySetView): def get_queryset(self): if self.request.user.is_superuser: partner_qs = Partner.objects.filter( ~Q(authorization_method=Partner.BUNDLE) ).order_by("company_name") if self.q: partner_qs = partner_qs.filter( company_name__istartswith=self.q ).order_by("company_name") elif coordinators in self.request.user.groups.all(): partner_qs = Partner.objects.filter( coordinator__pk=self.request.user.pk ).order_by("company_name") if self.q: partner_qs = partner_qs.filter( company_name__istartswith=self.q ).order_by("company_name") else: partner_qs = Partner.objects.none() return partner_qs class RequestApplicationView(EditorsOnly, ToURequired, EmailRequired, FormView): template_name = "applications/request_for_application.html" def get_context_data(self, **kwargs): context = super(RequestApplicationView, self).get_context_data(**kwargs) context["any_waitlisted"] = Partner.objects.filter( status=Partner.WAITLIST ).exists() return context def get_form_class(self): fields = {} field_order = [] open_apps = Application.objects.filter( editor=self.request.user.editor, status__in=( Application.SENT, Application.QUESTION, Application.PENDING, Application.APPROVED, ), ) open_apps_partners = [] for i in open_apps: open_apps_partners.append(i.partner.company_name) for partner in Partner.objects.filter( ~Q(authorization_method=Partner.BUNDLE) ).order_by("company_name"): if partner.company_name not in open_apps_partners: field_name = "partner_{id}".format(id=partner.id) fields[field_name] = forms.BooleanField( label=partner.company_name, required=False, widget=forms.CheckboxInput(attrs={"object": partner}), ) field_order.append(partner.company_name) form_class = type("RfAForm", (forms.Form,), fields) form_class.field_order = field_order return form_class def form_valid(self, form): partner_ids = [ int(key[8:]) for key in form.cleaned_data if form.cleaned_data[key] ] for each_id in partner_ids: try: if ( Partner.objects.get(id=each_id).authorization_method == Partner.BUNDLE ): partner_ids.remove(each_id) except Partner.DoesNotExist: partner_ids.remove(each_id) self.request.session[PARTNERS_SESSION_KEY] = partner_ids if len(partner_ids): return HttpResponseRedirect(reverse("applications:apply")) else: messages.add_message( self.request, messages.WARNING, _("Please select at least one partner."), ) return HttpResponseRedirect(reverse("applications:request")) class _BaseSubmitApplicationView( EditorsOnly, ToURequired, EmailRequired, DataProcessingRequired, FormView ): template_name = "applications/apply.html" form_class = BaseApplicationForm def get_form(self, form_class=None): if form_class is None: form_class = self.form_class kwargs = self.get_form_kwargs() field_params = {} partners = self._get_partners() user_fields = self._get_user_fields(partners) field_params["user"] = user_fields for partner in partners: key = "partner_{id}".format(id=partner.id) fields = self._get_partner_fields(partner) field_params[key] = fields kwargs["field_params"] = field_params return form_class(**kwargs) def get_initial(self): initial = super(_BaseSubmitApplicationView, self).get_initial() editor = self.request.user.editor for field in USER_FORM_FIELDS: initial[field] = getattr(editor, field) return initial def form_valid(self, form): editor = self.request.user.editor for field in USER_FORM_FIELDS: if field in form.cleaned_data: setattr(editor, field, form.cleaned_data[field]) editor.save() partner_fields = PARTNER_FORM_BASE_FIELDS + PARTNER_FORM_OPTIONAL_FIELDS for partner in form.field_params: partner_id = partner[8:] partner_obj = Partner.objects.get(id=partner_id) if partner_obj.authorization_method == Partner.BUNDLE: raise PermissionDenied app = Application() app.editor = self.request.user.editor app.partner = partner_obj if app.partner.status == Partner.WAITLIST: app.waitlist_status = True for field in partner_fields: label = "{partner}_{field}".format(partner=partner, field=field) try: data = form.cleaned_data[label] except KeyError: data = None if data == "[deleted]": fail_msg = _("This field consists only of restricted text.") form.add_error(label, fail_msg) return self.form_invalid(form) if data: setattr(app, field, data) app.save() del self.request.session[PARTNERS_SESSION_KEY] return super(_BaseSubmitApplicationView, self).form_valid(form) def _get_partner_fields(self, partner): return [ field for field in PARTNER_FORM_OPTIONAL_FIELDS if getattr(partner, field) ] def _get_user_fields(self, partners=None): if not partners: return None needed_fields = {} for field in USER_FORM_FIELDS: query = {"{field}".format(field=field): True} partners_queried = partners.filter(**query) if partners_queried.count(): requesting_partners = partners_queried.distinct() needed_fields[field] = [x.__str__() for x in partners_queried] return needed_fields class SubmitApplicationView(_BaseSubmitApplicationView): def dispatch(self, request, *args, **kwargs): fail_msg = _("Choose at least one resource you want access to.") if not PARTNERS_SESSION_KEY in list(request.session.keys()): messages.add_message(request, messages.WARNING, fail_msg) return HttpResponseRedirect(reverse("applications:request")) if len(request.session[PARTNERS_SESSION_KEY]) == 0: messages.add_message(request, messages.WARNING, fail_msg) return HttpResponseRedirect(reverse("applications:request")) try: partners = self._get_partners() if partners.count() == 0: messages.add_message(request, messages.WARNING, fail_msg) return HttpResponseRedirect(reverse("applications:request")) except: messages.add_message(request, messages.WARNING, fail_msg) return HttpResponseRedirect(reverse("applications:request")) return super(SubmitApplicationView, self).dispatch(request, *args, **kwargs) def get_success_url(self): messages.add_message( self.request, messages.SUCCESS, _("Your application has been submitted for review. Head over to <a href='{applications_url}'>My Applications</a> to view the status.") .format( applications_url=reverse_lazy( "users:my_applications", kwargs={"pk": self.request.user.editor.pk}, ) ), ) user_home = reverse( "users:editor_detail", kwargs={"pk": self.request.user.editor.pk} ) return user_home def _get_partners(self): partner_ids = self.request.session[PARTNERS_SESSION_KEY] partners = Partner.objects.filter(id__in=partner_ids) try: assert len(partner_ids) == partners.count() except AssertionError: logger.exception( "Number of partners found does not match number " "of IDs provided" ) raise return partners class SubmitSingleApplicationView(_BaseSubmitApplicationView): def dispatch(self, request, *args, **kwargs): if self._get_partners()[0].authorization_method == Partner.BUNDLE: raise PermissionDenied elif self._get_partners()[0].status == Partner.WAITLIST: messages.add_message( request, messages.WARNING, _("This partner does not have any access grants available at this time. You may still apply for access; your application will be reviewed when access grants become available."), ) if self._check_duplicate_applications(): url, message = self._check_duplicate_applications() messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(url, message) if not editor_bundle_eligible(self.request.user.editor): return HttpResponseRedirect(reverse("users:my_library")) return super(SubmitSingleApplicationView, self).dispatch( request, *args, **kwargs ) def get_success_url(self): messages.add_message( self.request, messages.SUCCESS, _("Your application has been submitted for review. Head over to <a href='{applications_url}'>My Applications</a> to view the status.") .format( applications_url=reverse_lazy( "users:my_applications", kwargs={"pk": self.request.user.editor.pk}, ) ), ) user_home = self._get_partners()[0].get_absolute_url() return user_home def _get_partners(self): partner_id = self.kwargs["pk"] self.request.session[PARTNERS_SESSION_KEY] = partner_id partners = Partner.objects.filter(id=partner_id) if not partners: raise Http404("No partner matches the given query") try: assert partners.count() == 1 except AssertionError: logger.exception( "Expected 1 partner, got {count}".format(count=partners.count()) ) raise return partners def _check_duplicate_applications(self): partner = self._get_partners()[0] if partner.specific_title: return False editor = Editor.objects.get(user=self.request.user) apps = Application.objects.filter( partner=partner, editor=editor, status__in=( Application.QUESTION, Application.PENDING, Application.APPROVED, ), ) if apps.exists(): message = _("You already have an application for this Partner.") if len(apps) == 1: app = apps[0] url = reverse("applications:evaluate", kwargs={"pk": app.id}) else: url = reverse("users:my_applications", kwargs={"pk": editor.pk}) return (url, message) return False class _BaseListApplicationView(CoordinatorsOnly, ToURequired, ListView): model = Application
MIT License
evhub/coconut
coconut/compiler/util.py
collapse_indents
python
def collapse_indents(indentation): change_in_level = ind_change(indentation) if change_in_level == 0: indents = "" elif change_in_level < 0: indents = closeindent * (-change_in_level) else: indents = openindent * change_in_level return indentation.replace(openindent, "").replace(closeindent, "") + indents
Removes all openindent-closeindent pairs.
https://github.com/evhub/coconut/blob/f90cbaae69275e9ad4c3fec1995151f9facdc4e5/coconut/compiler/util.py#L496-L505
from __future__ import print_function, absolute_import, unicode_literals, division from coconut.root import * import sys import re import traceback from functools import partial from contextlib import contextmanager from coconut._pyparsing import ( replaceWith, ZeroOrMore, Optional, SkipTo, CharsNotIn, ParseElementEnhance, ParseException, ParseResults, Combine, Regex, _trim_arity, _ParseResultsWithOffset, ) from coconut.terminal import ( logger, complain, get_name, ) from coconut.constants import ( opens, closes, openindent, closeindent, default_whitespace_chars, get_target_info, use_computation_graph, py2_vers, py3_vers, tabideal, ) from coconut.exceptions import ( CoconutException, CoconutInternalException, internal_assert, ) def find_new_value(value, toklist, new_toklist): if isinstance(value, ParseResults): if value._ParseResults__toklist == toklist: new_value_toklist = new_toklist else: new_value_toklist = [] for inner_value in value._ParseResults__toklist: new_value_toklist.append(find_new_value(inner_value, toklist, new_toklist)) return ParseResults(new_value_toklist) try: return new_toklist[toklist.index(value)] except ValueError: complain( lambda: CoconutInternalException( "inefficient reevaluation of tokens: {} not in {}".format( value, toklist, ), ), ) return evaluate_tokens(value) def evaluate_tokens(tokens): if isinstance(tokens, str): return tokens elif isinstance(tokens, ParseResults): toklist, name, asList, modal = tokens.__getnewargs__() new_toklist = [evaluate_tokens(toks) for toks in toklist] new_tokens = ParseResults(new_toklist, name, asList, modal) new_tokdict = {} for name, occurrences in tokens._ParseResults__tokdict.items(): new_occurences = [] for value, position in occurrences: new_value = find_new_value(value, toklist, new_toklist) new_occurences.append(_ParseResultsWithOffset(new_value, position)) new_tokdict[name] = occurrences new_tokens._ParseResults__accumNames.update(tokens._ParseResults__accumNames) new_tokens._ParseResults__tokdict.update(new_tokdict) return new_tokens elif isinstance(tokens, ComputationNode): return tokens.evaluate() elif isinstance(tokens, list): return [evaluate_tokens(inner_toks) for inner_toks in tokens] elif isinstance(tokens, tuple): return tuple(evaluate_tokens(inner_toks) for inner_toks in tokens) else: raise CoconutInternalException("invalid computation graph tokens", tokens) class ComputationNode(object): __slots__ = ("action", "loc", "tokens", "index_of_original") + (("been_called",) if DEVELOP else ()) list_of_originals = [] def __new__(cls, action, original, loc, tokens, ignore_no_tokens=False, ignore_one_token=False, greedy=False): if ignore_no_tokens and len(tokens) == 0: return [] elif ignore_one_token and len(tokens) == 1: return tokens[0] else: self = super(ComputationNode, cls).__new__(cls) self.action, self.loc, self.tokens = action, loc, tokens try: self.index_of_original = self.list_of_originals.index(original) except ValueError: self.index_of_original = len(self.list_of_originals) self.list_of_originals.append(original) if DEVELOP: self.been_called = False if greedy: return self.evaluate() else: return self @property def original(self): return self.list_of_originals[self.index_of_original] @property def name(self): name = getattr(self.action, "__name__", None) return name if name is not None else ascii(self.action) def evaluate(self): if DEVELOP: internal_assert(not self.been_called, "inefficient reevaluation of action " + self.name + " with tokens", self.tokens) self.been_called = True evaluated_toks = evaluate_tokens(self.tokens) if logger.tracing: logger.log_trace(self.name, self.original, self.loc, evaluated_toks, self.tokens) try: return _trim_arity(self.action)( self.original, self.loc, evaluated_toks, ) except CoconutException: raise except (Exception, AssertionError): traceback.print_exc() raise CoconutInternalException("error computing action " + self.name + " of evaluated tokens", evaluated_toks) def __repr__(self): inner_repr = "\n".join("\t" + line for line in repr(self.tokens).splitlines()) return self.name + "(\n" + inner_repr + "\n)" class CombineNode(Combine): __slots__ = () def _combine(self, original, loc, tokens): combined_tokens = super(CombineNode, self).postParse(original, loc, tokens) internal_assert(len(combined_tokens) == 1, "Combine produced multiple tokens", combined_tokens) return combined_tokens[0] def postParse(self, original, loc, tokens): return ComputationNode(self._combine, original, loc, tokens, ignore_no_tokens=True, ignore_one_token=True) if use_computation_graph: CustomCombine = CombineNode else: CustomCombine = Combine def add_action(item, action): return item.copy().addParseAction(action) def attach(item, action, ignore_no_tokens=None, ignore_one_token=None, **kwargs): if use_computation_graph: if ignore_no_tokens is None: ignore_no_tokens = getattr(action, "ignore_no_tokens", False) if ignore_one_token is None: ignore_one_token = getattr(action, "ignore_one_token", False) if ignore_no_tokens: kwargs["ignore_no_tokens"] = ignore_no_tokens if ignore_one_token: kwargs["ignore_one_token"] = ignore_one_token action = partial(ComputationNode, action, **kwargs) return add_action(item, action) def final(item): if use_computation_graph: item = add_action(item, evaluate_tokens) return item def unpack(tokens): logger.log_tag("unpack", tokens) if use_computation_graph: tokens = evaluate_tokens(tokens) if isinstance(tokens, ParseResults) and len(tokens) == 1: tokens = tokens[0] return tokens def parse(grammar, text): return unpack(grammar.parseWithTabs().parseString(text)) def all_matches(grammar, text): for tokens, start, stop in grammar.parseWithTabs().scanString(text): yield unpack(tokens), start, stop def match_in(grammar, text): for result in grammar.parseWithTabs().scanString(text): return True return False def append_it(iterator, last_val): for x in iterator: yield x yield last_val def get_vers_for_target(target): target_info = get_target_info(target) if not target_info: return py2_vers + py3_vers elif len(target_info) == 1: if target_info == (2,): return py2_vers elif target_info == (3,): return py3_vers else: raise CoconutInternalException("invalid target info", target_info) elif target_info == (3, 3): return [(3, 3), (3, 4)] else: return [target_info[:2]] def get_target_info_len2(target, mode="lowest"): supported_vers = get_vers_for_target(target) if mode == "lowest": return supported_vers[0] elif mode == "highest": return supported_vers[-1] elif mode == "nearest": if sys.version_info[:2] in supported_vers: return sys.version_info[:2] else: return supported_vers[-1] else: raise CoconutInternalException("unknown get_target_info_len2 mode", mode) def join_args(*arglists): return ", ".join(arg for args in arglists for arg in args if arg) def paren_join(items, sep): return items[0] if len(items) == 1 else "(" + (") " + sep + " (").join(items) + ")" skip_whitespace = SkipTo(CharsNotIn(default_whitespace_chars)).suppress() def longest(*args): internal_assert(len(args) >= 2, "longest expects at least two args") matcher = args[0] + skip_whitespace for elem in args[1:]: matcher ^= elem + skip_whitespace return matcher def addskip(skips, skip): if skip < 1: complain(CoconutInternalException("invalid skip of line " + str(skip))) else: skips.append(skip) return skips def count_end(teststr, testchar): count = 0 x = len(teststr) - 1 while x >= 0 and teststr[x] == testchar: count += 1 x -= 1 return count def paren_change(inputstring, opens=opens, closes=closes): count = 0 for c in inputstring: if c in opens: count -= 1 elif c in closes: count += 1 return count def ind_change(inputstring): return inputstring.count(openindent) - inputstring.count(closeindent) def compile_regex(regex): return re.compile(regex, re.U) def keyword(name): return Regex(name + r"\b", re.U) def fixto(item, output): return add_action(item, replaceWith(output)) def addspace(item): return attach(item, " ".join, ignore_no_tokens=True, ignore_one_token=True) def condense(item): return attach(item, "".join, ignore_no_tokens=True, ignore_one_token=True) def maybeparens(lparen, item, rparen): return item | lparen.suppress() + item + rparen.suppress() def tokenlist(item, sep, suppress=True): if suppress: sep = sep.suppress() return item + ZeroOrMore(sep + item) + Optional(sep) def itemlist(item, sep, suppress_trailing=True): return condense(item + ZeroOrMore(addspace(sep + item)) + Optional(sep.suppress() if suppress_trailing else sep)) def exprlist(expr, op): return addspace(expr + ZeroOrMore(op + expr)) def rem_comment(line): return line.split("#", 1)[0].rstrip() def should_indent(code): last = rem_comment(code.splitlines()[-1]) return last.endswith(":") or last.endswith("\\") or paren_change(last) < 0 def split_comment(line): base = rem_comment(line) return base, line[len(base):] def split_leading_comment(inputstring): if inputstring.startswith("#"): comment, rest = inputstring.split("\n", 1) return comment + "\n", rest else: return "", inputstring def split_leading_indent(line, max_indents=None): indent = "" while ( (max_indents is None or max_indents > 0) and line.startswith((openindent, closeindent)) ) or line.lstrip() != line: if max_indents is not None and line.startswith((openindent, closeindent)): max_indents -= 1 indent += line[0] line = line[1:] return indent, line def split_trailing_indent(line, max_indents=None): indent = "" while ( (max_indents is None or max_indents > 0) and line.endswith((openindent, closeindent)) ) or line.rstrip() != line: if max_indents is not None and (line.endswith(openindent) or line.endswith(closeindent)): max_indents -= 1 indent = line[-1] + indent line = line[:-1] return line, indent def split_leading_trailing_indent(line, max_indents=None): leading_indent, line = split_leading_indent(line, max_indents) line, trailing_indent = split_trailing_indent(line, max_indents) return leading_indent, line, trailing_indent
Apache License 2.0
flask-restful/flask-restful
flask_restful/inputs.py
_normalize_interval
python
def _normalize_interval(start, end, value): if not isinstance(start, datetime): start = datetime.combine(start, START_OF_DAY) end = datetime.combine(end, START_OF_DAY) if start.tzinfo is None: start = pytz.UTC.localize(start) end = pytz.UTC.localize(end) else: start = start.astimezone(pytz.UTC) end = end.astimezone(pytz.UTC) return start, end
Normalize datetime intervals. Given a pair of datetime.date or datetime.datetime objects, returns a 2-tuple of tz-aware UTC datetimes spanning the same interval. For datetime.date objects, the returned interval starts at 00:00:00.0 on the first date and ends at 00:00:00.0 on the second. Naive datetimes are upgraded to UTC. Timezone-aware datetimes are normalized to the UTC tzdata. Params: - start: A date or datetime - end: A date or datetime
https://github.com/flask-restful/flask-restful/blob/a2cf3a33992297110fd73742c3fbfbd7f819e5fb/flask_restful/inputs.py#L74-L102
from calendar import timegm from datetime import datetime, time, timedelta from email.utils import parsedate_tz, mktime_tz import re import aniso8601 import pytz START_OF_DAY = time(0, 0, 0, tzinfo=pytz.UTC) END_OF_DAY = time(23, 59, 59, 999999, tzinfo=pytz.UTC) url_regex = re.compile( r'^(?:http|ftp)s?://' r'(?:[^:@]+?:[^:@]*?@|)' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) def url(value): if not url_regex.search(value): message = u"{0} is not a valid URL".format(value) if url_regex.search('http://' + value): message += u". Did you mean: http://{0}".format(value) raise ValueError(message) return value class regex(object): def __init__(self, pattern, flags=0): self.pattern = pattern self.re = re.compile(pattern, flags) def __call__(self, value): if not self.re.search(value): message = 'Value does not match pattern: "{0}"'.format(self.pattern) raise ValueError(message) return value def __deepcopy__(self, memo): return regex(self.pattern)
BSD 3-Clause New or Revised License
wavefronthq/python-client
wavefront_api_client/api/integration_api.py
IntegrationApi.get_installed_integration_with_http_info
python
def get_installed_integration_with_http_info(self, **kwargs): all_params = ['has_content', 'return_content'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_installed_integration" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'has_content' in params: query_params.append(('hasContent', params['has_content'])) if 'return_content' in params: query_params.append(('returnContent', params['return_content'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['api_key'] return self.api_client.call_api( '/api/v2/integration/installed', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerListIntegration', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Gets a flat list of all Integrations that are installed, along with their status # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_installed_integration_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param bool has_content: :param bool return_content: :return: ResponseContainerListIntegration If the method is called asynchronously, returns the request thread.
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/api/integration_api.py#L415-L485
from __future__ import absolute_import import re import six from wavefront_api_client.api_client import ApiClient class IntegrationApi(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_all_integration(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_integration_with_http_info(**kwargs) else: (data) = self.get_all_integration_with_http_info(**kwargs) return data def get_all_integration_with_http_info(self, **kwargs): all_params = ['offset', 'limit'] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_integration" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'offset' in params: query_params.append(('offset', params['offset'])) if 'limit' in params: query_params.append(('limit', params['limit'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['api_key'] return self.api_client.call_api( '/api/v2/integration', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerPagedIntegration', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_integration_in_manifests(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_integration_in_manifests_with_http_info(**kwargs) else: (data) = self.get_all_integration_in_manifests_with_http_info(**kwargs) return data def get_all_integration_in_manifests_with_http_info(self, **kwargs): all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_integration_in_manifests" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['api_key'] return self.api_client.call_api( '/api/v2/integration/manifests', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerListIntegrationManifestGroup', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_integration_in_manifests_min(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_integration_in_manifests_min_with_http_info(**kwargs) else: (data) = self.get_all_integration_in_manifests_min_with_http_info(**kwargs) return data def get_all_integration_in_manifests_min_with_http_info(self, **kwargs): all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_integration_in_manifests_min" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['api_key'] return self.api_client.call_api( '/api/v2/integration/manifests/min', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerListIntegrationManifestGroup', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_all_integration_statuses(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_integration_statuses_with_http_info(**kwargs) else: (data) = self.get_all_integration_statuses_with_http_info(**kwargs) return data def get_all_integration_statuses_with_http_info(self, **kwargs): all_params = [] all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_all_integration_statuses" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) auth_settings = ['api_key'] return self.api_client.call_api( '/api/v2/integration/status', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseContainerMapStringIntegrationStatus', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_installed_integration(self, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_installed_integration_with_http_info(**kwargs) else: (data) = self.get_installed_integration_with_http_info(**kwargs) return data
Apache License 2.0
lukas-blecher/latex-ocr
dataset/dataset.py
Im2LatexDataset.prepare_data
python
def prepare_data(self, batch): eqs, ims = batch.T tok = self.tokenizer(list(eqs), return_token_type_ids=False) for k, p in zip(tok, [[self.bos_token_id, self.eos_token_id], [1, 1]]): tok[k] = pad_sequence([torch.LongTensor([p[0]]+x+[p[1]]) for x in tok[k]], batch_first=True, padding_value=self.pad_token_id) if self.max_seq_len < tok['attention_mask'].shape[1]: return next(self) images = [] for path in list(ims): im = cv2.imread(path) if im is None: print(path, 'not found!') continue im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) if not self.test: if np.random.random() < .04: im[im != 255] = 0 images.append(self.transform(image=im)['image'][:1]) try: images = torch.cat(images).float().unsqueeze(1) except RuntimeError: logging.critical('Images not working: %s' % (' '.join(list(ims)))) return None, None if self.pad: h, w = images.shape[2:] images = F.pad(images, (0, self.max_dimensions[0]-w, 0, self.max_dimensions[1]-h), value=1) return tok, images
loads images into memory Args: batch (numpy.array[[str, str]]): array of equations and image path pairs Returns: tuple(torch.tensor, torch.tensor): data in memory
https://github.com/lukas-blecher/latex-ocr/blob/052e11d0f918dde37b96aa7b92421f11c331a7e4/dataset/dataset.py#L142-L180
import albumentations as alb from albumentations.pytorch import ToTensorV2 import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence import torch.utils.data as data from torchvision import transforms import numpy as np import imagesize import logging import glob import os from os.path import join from collections import defaultdict import pickle from PIL import Image import cv2 from transformers import PreTrainedTokenizerFast from tqdm.auto import tqdm train_transform = alb.Compose( [ alb.Compose( [alb.ShiftScaleRotate(shift_limit=0, scale_limit=(-.15, 0), rotate_limit=1, border_mode=0, interpolation=3, value=[255, 255, 255], p=1), alb.GridDistortion(distort_limit=0.1, border_mode=0, interpolation=3, value=[255, 255, 255], p=.5)], p=.15), alb.RGBShift(r_shift_limit=15, g_shift_limit=15, b_shift_limit=15, p=0.3), alb.GaussNoise(10, p=.2), alb.RandomBrightnessContrast(.05, (-.2, 0), True, p=0.2), alb.ImageCompression(95, p=.3), alb.ToGray(always_apply=True), alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)), ToTensorV2(), ] ) test_transform = alb.Compose( [ alb.ToGray(always_apply=True), alb.Normalize((0.7931, 0.7931, 0.7931), (0.1738, 0.1738, 0.1738)), ToTensorV2(), ] ) class Im2LatexDataset: keep_smaller_batches = False shuffle = True batchsize = 16 max_dimensions = (1024, 512) min_dimensions = (32, 32) max_seq_len = 1024 pad_token = "[PAD]" bos_token = "[BOS]" eos_token = "[EOS]" pad_token_id = 0 bos_token_id = 1 eos_token_id = 2 transform = train_transform def __init__(self, equations=None, images=None, tokenizer=None, shuffle=True, batchsize=16, max_seq_len=1024, max_dimensions=(1024, 512), min_dimensions=(32, 32), pad=False, keep_smaller_batches=False, test=False): if images is not None and equations is not None: assert tokenizer is not None self.images = [path.replace('\\', '/') for path in glob.glob(join(images, '*.png'))] self.sample_size = len(self.images) eqs = open(equations, 'r').read().split('\n') self.indices = [int(os.path.basename(img).split('.')[0]) for img in self.images] self.tokenizer = PreTrainedTokenizerFast(tokenizer_file=tokenizer) self.shuffle = shuffle self.batchsize = batchsize self.max_dimensions = max_dimensions self.min_dimensions = min_dimensions self.pad = pad self.keep_smaller_batches = keep_smaller_batches self.test = test self.data = defaultdict(lambda: []) try: for i, im in tqdm(enumerate(self.images), total=len(self.images)): width, height = imagesize.get(im) if min_dimensions[0] <= width <= max_dimensions[0] and min_dimensions[1] <= height <= max_dimensions[1]: self.data[(width, height)].append((eqs[self.indices[i]], im)) except KeyboardInterrupt: pass self.data = dict(self.data) self._get_size() iter(self) def __len__(self): return self.size def __iter__(self): self.i = 0 self.transform = test_transform if self.test else train_transform self.pairs = [] for k in self.data: info = np.array(self.data[k], dtype=object) p = torch.randperm(len(info)) if self.shuffle else torch.arange(len(info)) for i in range(0, len(info), self.batchsize): batch = info[p[i:i+self.batchsize]] if len(batch.shape) == 1: batch = batch[None, :] if len(batch) < self.batchsize and not self.keep_smaller_batches: continue self.pairs.append(batch) if self.shuffle: self.pairs = np.random.permutation(np.array(self.pairs, dtype=object)) else: self.pairs = np.array(self.pairs, dtype=object) self.size = len(self.pairs) return self def __next__(self): if self.i >= self.size: raise StopIteration self.i += 1 return self.prepare_data(self.pairs[self.i-1])
MIT License
marrink-lab/vermouth-martinize
vermouth/gmx/itp_read.py
ITPDirector._block_atoms
python
def _block_atoms(self, line, lineno=0): tokens = collections.deque(_tokenize(line)) self._parse_block_atom(tokens, self.current_block)
Parses the lines of the [atoms] directive.
https://github.com/marrink-lab/vermouth-martinize/blob/4261733ffa187cda80ac4f55510d2dc794d781ea/vermouth/gmx/itp_read.py#L246-L251
import collections from vermouth.molecule import (Block, Interaction) from vermouth.parser_utils import (SectionLineParser, _tokenize) class ITPDirector(SectionLineParser): COMMENT_CHAR = ';' atom_idxs = {'bonds': [0, 1], 'position_restraints': [0], 'angles': [0, 1, 2], 'constraints': [0, 1], 'dihedrals': [0, 1, 2, 3], 'pairs': [0, 1], 'pairs_nb': [0, 1], 'exclusions': [slice(None, None)], 'virtual_sites1': [0], 'virtual_sites2': [0, 1, 2], 'virtual_sites3': [0, 1, 2, 3], 'virtual_sites4': [slice(0, 5)], 'virtual_sitesn': [0, slice(2, None)], 'settles': [0], 'distance_restraints': [0, 1], 'dihedral_restraints': [slice(0, 4)], 'orientation_restraints': [0, 1], 'angle_restraints': [slice(0, 4)], 'angle_restraints_z': [0, 1]} def __init__(self, force_field): super().__init__() self.force_field = force_field self.current_block = None self.current_meta = None self.blocks = collections.OrderedDict() self.header_actions = { ('moleculetype', ): self._new_block } self.current_atom_names = [] def dispatch(self, line): if self.is_section_header(line): return self.parse_header elif self.is_pragma(line): return self.parse_pragma else: return self.parse_section @staticmethod def is_pragma(line): return line.startswith('#') def parse_pragma(self, line, lineno=0): if line == '#endif': if self.current_meta is not None: self.current_meta = None elif self.current_meta is None: raise IOError("Your #ifdef section is orderd incorrectly." "At line {} I read #endif but I haven not read" "a ifdef before.".format(lineno)) elif line.startswith("#else"): if self.current_meta is None: raise IOError("Your #ifdef section is orderd incorrectly." "At line {} I read #endif but I haven not read" "a ifdef before.".format(lineno)) inverse = {"ifdef": "ifndef", "ifndef": "ifdef"} tag = self.current_meta["tag"] condition = inverse[self.current_meta["condition"]] self.current_meta = {'tag': tag, 'condition': condition.replace("#", "")} elif line.startswith("#ifdef") or line.startswith("#ifndef"): if self.current_meta is None: condition, tag = line.split() self.current_meta = {'tag': tag, 'condition': condition.replace("#", "")} elif self.current_meta is not None: raise IOError("Your #ifdef/#ifndef section is orderd incorrectly." "At line {} I read {} but there is still" "an open #ifdef/#ifndef section from" "before.".format(lineno, line.split()[0])) else: raise IOError("Don't know how to parse pargma {} at" "line {}.".format(line, lineno)) def parse_header(self, line, lineno=0): prev_section = self.section ended = [] section = self.section + [line.strip('[ ]').casefold()] if tuple(section[-1:]) in self.METH_DICT: self.section = section[-1:] else: while tuple(section) not in self.METH_DICT and len(section) > 1: ended.append(section.pop(-2)) self.section = section result = None if len(prev_section) != 0: result = self.finalize_section(prev_section, ended) action = self.header_actions.get(tuple(self.section)) if action: action() return result def finalize_section(self, previous_section, ended_section): if "atoms" in ended_section: self.current_atom_names = list(self.current_block.nodes) if self.current_block is not None: self.force_field.blocks[self.current_block.name] = self.current_block def finalize(self, lineno=0): if self.current_meta is not None: raise IOError("Your #ifdef/#ifndef section is orderd incorrectly." "There is no #endif for the last pragma.") super().finalize() def _new_block(self): self.current_block = Block(force_field=self.force_field) @SectionLineParser.section_parser('moleculetype') def _block(self, line, lineno=0): name, nrexcl = line.split() self.current_block.name = name self.current_block.nrexcl = int(nrexcl) @SectionLineParser.section_parser('moleculetype', 'atoms')
Apache License 2.0
pyglet/pyglet
pyglet/media/drivers/xaudio2/adaptation.py
XAudio2AudioPlayer.delete
python
def delete(self): if self._xa2_source_voice: self._deleted = True
Called from Player. Docs says to cleanup resources, but other drivers wait for GC to do it?
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/media/drivers/xaudio2/adaptation.py#L120-L123
import math import pyglet from pyglet.media.drivers.base import AbstractAudioDriver, AbstractAudioPlayer from pyglet.media.drivers.listener import AbstractListener from pyglet.media.events import MediaEvent from pyglet.util import debug_print from . import interface _debug = debug_print('debug_media') def _convert_coordinates(coordinates): x, y, z = coordinates return x, y, -z class XAudio2AudioPlayer(AbstractAudioPlayer): _cone_inner_angle = 360 _cone_outer_angle = 360 min_buffer_size = 9600 max_buffer_count = 3 def __init__(self, driver, xa2_driver, source, player): super(XAudio2AudioPlayer, self).__init__(source, player) self.driver = driver self._xa2_driver = xa2_driver self._flushing = False self._deleted = False self._playing = False self._write_cursor = 0 self._play_cursor = 0 self._events = [] self._timestamps = [] self.buffer_end_submitted = False self._buffers = [] self._xa2_source_voice = self._xa2_driver.get_source_voice(source, self) self._buffer_size = int(source.audio_format.sample_rate * 2) def on_driver_destroy(self): self.stop() self._xa2_source_voice = None def on_driver_reset(self): self._xa2_source_voice = self._xa2_driver.get_source_voice(self.source, self) for cx2_buffer in self._buffers: self._xa2_source_voice.submit_buffer(cx2_buffer) def __del__(self): if self._xa2_source_voice: self._xa2_source_voice = None
BSD 3-Clause New or Revised License
yf19970118/opld-pytorch
dota_devkit/ImgSplit_multi_process.py
splitbase.__init__
python
def __init__(self, basepath, outpath, code='utf-8', gap=512, subsize=1024, thresh=0.7, choosebestpoint=True, ext='.png', padding=True, num_process=8 ): self.basepath = basepath self.outpath = outpath self.code = code self.gap = gap self.subsize = subsize self.slide = self.subsize - self.gap self.thresh = thresh self.imagepath = os.path.join(self.basepath, 'images') self.labelpath = os.path.join(self.basepath, 'labelTxt-v1.0') self.outimagepath = os.path.join(self.outpath, 'images') self.outlabelpath = os.path.join(self.outpath, 'labelTxt') self.choosebestpoint = choosebestpoint self.ext = ext self.padding = padding self.pool = Pool(num_process) print('padding:', padding) if not os.path.isdir(self.outpath): os.mkdir(self.outpath) if not os.path.isdir(self.outimagepath): os.mkdir(self.outimagepath) if not os.path.isdir(self.outlabelpath): os.mkdir(self.outlabelpath)
:param basepath: base path for dota data :param outpath: output base path for dota data, the basepath and outputpath have the similar subdirectory, 'images' and 'labelTxt' :param code: encodeing format of txt file :param gap: overlap between two patches :param subsize: subsize of patch :param thresh: the thresh determine whether to keep the instance if the instance is cut down in the process of split :param choosebestpoint: used to choose the first point for the :param ext: ext for the image format :param padding: if to padding the images so that all the images have the same size
https://github.com/yf19970118/opld-pytorch/blob/4939bf62587da4533276fda20db36bb019575511/dota_devkit/ImgSplit_multi_process.py#L44-L93
import os import codecs import numpy as np import math from dota_utils import GetFileFromThisRootDir import cv2 import shapely.geometry as shgeo import dota_utils as util import copy from multiprocessing import Pool from functools import partial import time def cal_line_length(point1, point2): return math.sqrt( math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2)) class splitbase():
MIT License
adicksonlab/wepy
src/wepy/analysis/contig_tree.py
BaseContigTree._set_discontinuities
python
def _set_discontinuities(self, wepy_h5, boundary_conditions_class): self._initialize_discontinuities(wepy_h5) for run_idx in self.run_idxs: warping_records = wepy_h5.warping_records([run_idx]) warp_cycle_idxs = set([rec[0] for rec in warping_records]) for node in self.graph.nodes: node_run_idx = node[0] node_cycle_idx = node[1] if (node_run_idx == run_idx) and (node_cycle_idx in warp_cycle_idxs): cycle_warp_records = [rec for rec in warping_records if (rec[0] == node_cycle_idx)] for rec in cycle_warp_records: rec_traj_idx = rec[1] if boundary_conditions_class.warping_discontinuity(rec): self.graph.node[node][self.DISCONTINUITY_KEY][rec_traj_idx] = -1
Given the boundary condition class sets node attributes for where there are discontinuities in the parental lineages. Parameters ---------- boundary_conditions_class : class implementing BoundaryCondition interface
https://github.com/adicksonlab/wepy/blob/4a968606caa3141ad0f9981cd0fa4103dd090431/src/wepy/analysis/contig_tree.py#L307-L355
import itertools as it from copy import copy from operator import attrgetter from collections import deque import networkx as nx import numpy as np from matplotlib import cm from geomm.free_energy import free_energy as calc_free_energy from wepy.analysis.parents import ( DISCONTINUITY_VALUE, parent_panel, net_parent_table, ancestors, sliding_window, parent_cycle_discontinuities, ParentForest ) from wepy.analysis.network_layouts.tree import ResamplingTreeLayout from wepy.analysis.network_layouts.layout_graph import LayoutGraph try: import pandas as pd except ModuleNotFoundError: warn("pandas is not installed and that functionality will not work", RuntimeWarning) RESAMPLING = 'resampling' RESAMPLER = 'resampler' WARPING = 'warping' PROGRESS = 'progress' BC = 'boundary_conditions' class BaseContigTree(): RESAMPLING_PANEL_KEY = 'resampling_steps' PARENTS_KEY = 'parent_idxs' DISCONTINUITY_KEY = 'discontinuities' def __init__(self, wepy_h5, continuations=Ellipsis, runs=Ellipsis, boundary_condition_class=None, decision_class=None): was_closed = False if wepy_h5.closed: was_closed = True wepy_h5.open() self._graph = nx.DiGraph() self._boundary_condition_class=boundary_condition_class self._decision_class = decision_class self._continuations = set() self._run_idxs = set() if runs is Ellipsis: self._run_idxs.update(wepy_h5.run_idxs) elif runs is not None: self._run_idxs.update(runs) if continuations is Ellipsis: self._continuations.update([ (a,b) for a, b in wepy_h5.continuations if a in self._run_idxs and b in self._run_idxs] ) elif continuations is not None: self._continuations.update([ (a,b) for a, b in continuations if a in self._run_idxs and b in self._run_idxs] ) self._create_tree(wepy_h5) self._set_resampling_panels(wepy_h5) if self._decision_class is not None: self._set_parents(self._decision_class) if self._boundary_condition_class is not None: self._set_discontinuities(wepy_h5, self._boundary_condition_class) else: self._initialize_discontinuities(wepy_h5) self._spans = {span_idx : span_trace for span_idx, span_trace in enumerate(self.spanning_contig_traces())} if was_closed: wepy_h5.close() @property def graph(self): return self._graph @property def decision_class(self): return self._decision_class @property def boundary_condition_class(self): return self._boundary_condition_class @property def span_traces(self): return self._spans def span_contig(self, span_idx): contig = self.make_contig(self.span_traces[span_idx]) return contig def _create_tree(self, wepy_h5): for run_idx in self._run_idxs: n_cycles = wepy_h5.num_run_cycles(run_idx) nodes = [(run_idx, step_idx) for step_idx in range(n_cycles)] self.graph.add_nodes_from(nodes) edge_node_idxs = list(zip(range(1, n_cycles), range(n_cycles - 1))) edges = [(nodes[a], nodes[b]) for a, b in edge_node_idxs] self.graph.add_edges_from(edges) for edge_source, edge_target in self._continuations: source_node = (edge_source, 0) target_node = (edge_target, wepy_h5.num_run_cycles(edge_target)-1) edge = (source_node, target_node) self.graph.add_edge(*edge) def _set_resampling_panels(self, wepy_h5): for run_idx in self.run_idxs: run_resampling_panel = wepy_h5.run_resampling_panel(run_idx) for step_idx, step in enumerate(run_resampling_panel): node = (run_idx, step_idx) self.graph.nodes[node][self.RESAMPLING_PANEL_KEY] = step def _initialize_discontinuities(self, wepy_h5): for node in self.graph.nodes: n_walkers = len(self.graph.node[node][self.PARENTS_KEY]) self.graph.node[node][self.DISCONTINUITY_KEY] = [0 for i in range(n_walkers)]
MIT License
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_workspace_declaration.py
V1beta1WorkspaceDeclaration.__init__
python
def __init__(self, description=None, mount_path=None, name='', optional=None, read_only=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._description = None self._mount_path = None self._name = None self._optional = None self._read_only = None self.discriminator = None if description is not None: self.description = description if mount_path is not None: self.mount_path = mount_path self.name = name if optional is not None: self.optional = optional if read_only is not None: self.read_only = read_only
V1beta1WorkspaceDeclaration - a model defined in OpenAPI
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_workspace_declaration.py#L65-L86
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1WorkspaceDeclaration(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'description': 'str', 'mount_path': 'str', 'name': 'str', 'optional': 'bool', 'read_only': 'bool' } attribute_map = { 'description': 'description', 'mount_path': 'mountPath', 'name': 'name', 'optional': 'optional', 'read_only': 'readOnly' }
Apache License 2.0
oceanprotocol/aquarius
aquarius/run.py
health
python
def health(): return get_status()
Returns conntection db status with mongodb or elasticsearch.
https://github.com/oceanprotocol/aquarius/blob/4cb5fe46019aa990cc53d69f2e78409d207ac62b/aquarius/run.py#L55-L59
import configparser from elasticsearch import Elasticsearch from flask import jsonify from flask_swagger import swagger from flask_swagger_ui import get_swaggerui_blueprint from aquarius.app.assets import assets from aquarius.app.chains import chains from aquarius.app.util import get_bool_env_value from aquarius.config import Config from aquarius.constants import BaseURLs, Metadata from aquarius.events.events_monitor import EventsMonitor from aquarius.events.util import setup_web3 from aquarius.myapp import app config = Config(filename=app.config["AQUARIUS_CONFIG_FILE"]) aquarius_url = config.aquarius_url def get_version(): conf = configparser.ConfigParser() conf.read(".bumpversion.cfg") return conf["bumpversion"]["current_version"] @app.route("/") def version(): info = dict() info["software"] = Metadata.TITLE info["version"] = get_version() info["plugin"] = config.module return jsonify(info) @app.route("/health")
Apache License 2.0
triton-inference-server/model_analyzer
model_analyzer/triton/server/server_config.py
TritonServerConfig.copy
python
def copy(self): config_copy = TritonServerConfig() config_copy.update_config(params=self._server_args) return config_copy
Returns ------- TritonServerConfig object that has the same args as this one
https://github.com/triton-inference-server/model_analyzer/blob/3151792403b4e3257dd5188fe745bbdf68e521e9/model_analyzer/triton/server/server_config.py#L126-L136
from model_analyzer.model_analyzer_exceptions import TritonModelAnalyzerException class TritonServerConfig: server_arg_keys = [ 'log-verbose', 'log-info', 'log-warning', 'log-error', 'id', 'model-store', 'model-repository', 'exit-timeout-secs', 'exit-on-error', 'strict-model-config', 'strict-readiness', 'allow-http', 'http-port', 'http-thread-count', 'allow-grpc', 'grpc-port', 'grpc-infer-allocation-pool-size', 'grpc-use-ssl', 'grpc-server-cert', 'grpc-server-key', 'grpc-root-cert', 'allow-metrics', 'allow-gpu-metrics', 'metrics-port', 'trace-file', 'trace-level', 'trace-rate', 'model-control-mode', 'repository-poll-secs', 'load-model', 'pinned-memory-pool-byte-size', 'cuda-memory-pool-byte-size', 'min-supported-compute-capability', 'backend-directory', 'backend-config', 'allow-soft-placement', 'gpu-memory-fraction', 'tensorflow-version' ] def __init__(self): self._server_args = {k: None for k in self.server_arg_keys} @classmethod def allowed_keys(cls): snake_cased_keys = [ key.replace('-', '_') for key in cls.server_arg_keys ] return cls.server_arg_keys + snake_cased_keys def update_config(self, params=None): if params: for key in params: self[key.strip().replace('_', '-')] = params[key] def to_cli_string(self): return ' '.join( [f'--{key}={val}' for key, val in self._server_args.items() if val])
Apache License 2.0
awslabs/gluon-ts
src/gluonts/support/util.py
LinearInterpolation.linear_interpolation
python
def linear_interpolation(self, x: float) -> np.ndarray: if self.x_coord[0] >= x: return self.y_coord[0] elif self.x_coord[-1] <= x: return self.y_coord[-1] else: for i, (x1, x2) in enumerate(zip(self.x_coord, self.x_coord[1:])): if x1 < x < x2: denominator = x2 - x1 + self.tol return (x2 - x) / denominator * self.y_coord[i] + ( x - x1 ) / denominator * self.y_coord[i + 1]
If x is out of interpolation range, return smallest or largest value. Otherwise, find two nearest points [x_1, y_1], [x_2, y_2] and return its linear interpolation y = (x_2 - x)/(x_2 - x_1) * y_1 + (x - x_1)/(x_2 - x_1) * y_2. Parameters ---------- x x-coordinate to evaluate the interpolated points. Returns ------- np.ndarray Interpolated values same shape as self.y_coord
https://github.com/awslabs/gluon-ts/blob/df4256b0e67120db555c109a1bf6cfa2b3bd3cd8/src/gluonts/support/util.py#L212-L241
import os import signal import time from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple import numpy as np def pad_to_size( x: np.array, size: int, axis: int = 0, is_right_pad: bool = True ): pad_length = size - x.shape[axis] if pad_length <= 0: return x pad_width = [(0, 0)] * x.ndim right_pad = (0, pad_length) pad_width[axis] = right_pad if is_right_pad else right_pad[::-1] return np.pad(x, mode="constant", pad_width=pad_width) class Timer: def __enter__(self): self.start = time.perf_counter() self.interval = None return self def __exit__(self, *args): self.end = time.perf_counter() self.interval = self.end - self.start class SignalHandler: Callback = Optional[Callable[[int, Any], None]] def __init__(self, handlers_map: Dict[int, Callback]) -> None: self.handlers_map = handlers_map def __enter__(self): self.default_handlers = { s: signal.signal(s, h) for s, h in self.handlers_map.items() } return self def __exit__(self, *args): for s, h in self.default_handlers.items(): signal.signal(s, h) def maybe_len(obj) -> Optional[int]: try: return len(obj) except (NotImplementedError, AttributeError): return None def get_download_path() -> Path: return Path( os.environ.get("MXNET_HOME", str(Path.home() / ".mxnet" / "gluon-ts")) ) def map_dct_values(fn: Callable, dct: dict) -> dict: return {key: fn(value) for key, value in dct.items()} def erf(x: np.array) -> np.array: ones = np.ones_like(x) zeros = np.zeros_like(x) t = ones / (ones + 0.5 * np.abs(x)) coefficients = [ 1.00002368, 0.37409196, 0.09678418, -0.18628806, 0.27886807, -1.13520398, 1.48851587, -0.82215223, 0.17087277, ] inner = zeros for c in coefficients[::-1]: inner = t * (c + inner) res = ones - t * np.exp((inner - 1.26551223 - np.square(x))) return np.where(x >= zeros, res, -1.0 * res) def erfinv(x: np.array) -> np.array: zeros = np.zeros_like(x) w = -np.log((1.0 - x) * (1.0 + x)) mask_lesser = w < (zeros + 5.0) w = np.where(mask_lesser, w - 2.5, np.sqrt(w) - 3.0) coefficients_lesser = [ 2.81022636e-08, 3.43273939e-07, -3.5233877e-06, -4.39150654e-06, 0.00021858087, -0.00125372503, -0.00417768164, 0.246640727, 1.50140941, ] coefficients_greater_equal = [ -0.000200214257, 0.000100950558, 0.00134934322, -0.00367342844, 0.00573950773, -0.0076224613, 0.00943887047, 1.00167406, 2.83297682, ] p = np.where( mask_lesser, coefficients_lesser[0] + zeros, coefficients_greater_equal[0] + zeros, ) for c_l, c_ge in zip( coefficients_lesser[1:], coefficients_greater_equal[1:] ): c = np.where(mask_lesser, c_l + zeros, c_ge + zeros) p = c + p * w return p * x class LinearInterpolation: def __init__( self, x_coord: List[float], y_coord: List[np.ndarray], tol: float = 1e-8, ) -> None: self.x_coord = x_coord assert sorted(self.x_coord) == self.x_coord self.y_coord = y_coord self.num_points = len(self.x_coord) assert ( self.num_points >= 2 ), "Need at least two points for linear interpolation." self.tol = tol def __call__(self, x: float): return self.linear_interpolation(x)
Apache License 2.0
universaldevicesinc/polyglot
polyglot/element_manager/isy/__init__.py
get_config
python
def get_config(): return {'address': ADDRESS, 'https': HTTPS == 'https', 'password': PASSWORD, 'username': USERNAME, 'port': PORT, 'version': VERSION}
Returns the element's configuration.
https://github.com/universaldevicesinc/polyglot/blob/fdce13cc0d86c8fccede06543db3dd7ab99003a1/polyglot/element_manager/isy/__init__.py#L70-L74
import logging from polyglot.element_manager import http from . import incoming import xml.etree.ElementTree as ET import os import requests import time import threading try: from urllib import quote, urlencode except ImportError: from urllib.parse import quote, urlencode DEFAULT_CONFIG = {'address': '192.168.10.100', 'https': False, 'password': 'admin', 'username': 'admin', 'port': 80, 'version':'0.0.0'} _TIMEOUT = 25.0 SESSION = None ADDRESS = None HTTPS = None PASSWORD = None PORT = None USERNAME = None VERSION = '0.0.0' _LOGGER = logging.getLogger(__name__) SLOCK = threading.Lock() STATS = {'ntotal': 0, 'rtotal': 0, 'oktotal': 0, 'ertotal': 0, 'ettotal': 0.0, 'ethigh': 0.0, 'etlow': 0.0} def load(pglot, user_config): config = dict(DEFAULT_CONFIG) config.update(user_config) set_config(config) http.register(incoming.HANDLERS, parent_dir='ns') incoming.PGLOT = pglot _LOGGER.info('Loaded ISY element') def unload(): _LOGGER.info('Unloaded ISY element')
MIT License
tlc-pack/tenset
tests/python/contrib/test_arm_compute_lib/test_dense.py
_get_qnn_params
python
def _get_qnn_params(input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w): input_max = input_sc * (255 - input_zp) input_min = -input_sc * input_zp kernel_max = kernel_sc * (255 - kernel_zp) kernel_min = -kernel_sc * kernel_zp output_limits = [ kernel_max * kernel_h * kernel_w * input_max, kernel_min * kernel_h * kernel_w * input_max, kernel_min * kernel_h * kernel_w * input_min, kernel_max * kernel_h * kernel_w * input_min, ] output_max = max(output_limits) output_min = min(output_limits) output_sc = (output_max - output_min) / 255 output_zp = -int(output_min / output_sc) return output_zp, output_sc
Get output qnn parameters given input and kernel parameters.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/tests/python/contrib/test_arm_compute_lib/test_dense.py#L49-L65
import numpy as np import tvm from tvm import relay from tvm import testing from test_arm_compute_lib.infrastructure import ( Device, skip_runtime_test, skip_codegen_test, build_and_run, verify, verify_codegen, ) def _get_model(shape, weight_shape, units, dtype, var_names, has_bias=False): a = relay.var(next(var_names), shape=shape, dtype=dtype) w = tvm.nd.array(np.random.uniform(-128, 127, weight_shape).astype(dtype)) weights = relay.const(w, dtype) out = relay.nn.dense(a, weights, units=units, out_dtype=dtype) params = {"w": w} if has_bias: b = tvm.nd.array(np.random.randint(-128, 127, weight_shape[0]).astype(dtype)) biasc = relay.const(b, dtype) out = relay.nn.bias_add(out, biasc) params["b"] = b return out, params
Apache License 2.0
shinya7y/universenet
mmdet/models/losses/iou_loss.py
iou_loss
python
def iou_loss(pred, target, linear=False, mode='log', eps=1e-6): assert mode in ['linear', 'square', 'log'] if linear: mode = 'linear' warnings.warn('DeprecationWarning: Setting "linear=True" in ' 'iou_loss is deprecated, please use "mode=`linear`" ' 'instead.') ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) if mode == 'linear': loss = 1 - ious elif mode == 'square': loss = 1 - ious**2 elif mode == 'log': loss = -ious.log() else: raise NotImplementedError return loss
IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). linear (bool, optional): If True, use linear scale of loss instead of log scale. Default: False. mode (str): Loss scaling mode, including "linear", "square", and "log". Default: 'log' eps (float): Eps to avoid log(0). Return: torch.Tensor: Loss tensor.
https://github.com/shinya7y/universenet/blob/9bf72a34737fea4f2f4080e90a72fa18c776b6e6/mmdet/models/losses/iou_loss.py#L16-L50
import math import warnings import mmcv import torch import torch.nn as nn from mmdet.core import bbox_overlaps from ..builder import LOSSES from .utils import weighted_loss @mmcv.jit(derivate=True, coderize=True) @weighted_loss
Apache License 2.0
friedererdmann/blender_menus
blender_menu_structure/__init__.py
an_example
python
def an_example(): print("An Example!")
A simple example method to call in our example menu.
https://github.com/friedererdmann/blender_menus/blob/1896dcf5c07676a1dccc3c252980e9a26f4a3c0b/blender_menu_structure/__init__.py#L164-L168
bl_info = { "name": "Dynamically created Blender Menu", "author": "Frieder Erdmann, Boris Ignjatovic", "blender": (2, 80, 3), "location": "Menu > Submenus", "description": "A test to create dynamic menus", "warning": "", "wiki_url": "", "category": "UI" } import os from functools import partial import bpy from bpy.utils import previews imageloader = None class ImageLoader(): def __init__(self): self.preview_handler = previews.new() def blender_icon(self, icon="NONE"): if icon.lower().endswith(".png"): icon = self.load_icon(icon) return self.preview_handler[icon].icon_id def load_icon(self, filename="image.png"): script_dir = os.path.dirname(__file__) icon_dir = os.path.join(script_dir, "icons") filepath = os.path.join(icon_dir, filename) name = os.path.splitext(filename)[0] self.preview_handler.load(name, filepath, 'IMAGE') return name def unregister(self): previews.remove(self.preview_handler) self.preview_handler = None def add_menu(name, icon_value=0, parent_name="TOPBAR_MT_editor_menus"): def draw(self, context): pass def menu_draw(self, context): self.layout.menu(my_menu_class.bl_idname, icon_value=icon_value) bl_idname = "MENU_MT_{0}".format(name.replace(" ", "")) full_parent_name = "bpy.types.{0}".format(parent_name) parent = eval(full_parent_name) my_menu_class = type( "DynamicMenu{0}".format(name), (bpy.types.Menu,), { "bl_idname": bl_idname, "bl_label": name, "draw": draw }) bpy.utils.register_class(my_menu_class) parent.append(menu_draw) return bl_idname def add_operator(name, callback, icon_value=0, tooltip='Dynamic Operator', parent_name="TOPBAR_MT_editor_menus"): def execute(self, context): my_operator_class.func() return {"FINISHED"} def operatator_draw(self, context): self.layout.operator(my_operator_class.bl_idname, icon_value=icon_value) bl_idname = "menuentry.{0}".format(name.replace(" ", "").lower()) full_parent_name = "bpy.types.{0}".format(parent_name) parent = eval(full_parent_name) my_operator_class = type( "DynamicOperator{0}".format(name), (bpy.types.Operator,), { "bl_idname": bl_idname, "bl_label": name, "func": callback, "execute": execute, "__doc__": tooltip }) bpy.utils.register_class(my_operator_class) parent.append(operatator_draw) return bl_idname def add_separator(parent_name="TOPBAR_MT_editor_menus"): def draw_separator(self, context): self.layout.row().separator() full_parent_name = "bpy.types.{0}".format(parent_name) parent = eval(full_parent_name) parent.append(draw_separator) def build_menus(menu_dict, parent_name="TOPBAR_MT_editor_menus"): for key in menu_dict: if key.count("-") == len(key): add_separator(parent_name) entry = menu_dict[key] icon = 0 tooltip = "" if "icon" in entry: icon = imageloader.blender_icon(entry["icon"]) if "tooltip" in entry: tooltip = entry["tooltip"] if "menu" in entry: bl_idname = add_menu( name=key, icon_value=icon, parent_name=parent_name) build_menus(entry["menu"], bl_idname) elif "operator" in entry: add_operator( name=key, callback=entry["operator"], icon_value=icon, tooltip=tooltip, parent_name=parent_name)
MIT License
machinezone/cobra
cobras/bavarde/runner/client.py
client
python
def client( url, role, secret, channel, position, username, password, stream_sql, verbose, dev ): if os.getenv('DEBUG') is not None or verbose: pprint.pprint(locals()) try: import readline except ImportError: pass if dev: url = DEFAULT_LOCAL_URL loop = asyncio.new_event_loop() inputs: asyncio.Queue[str] = asyncio.Queue(loop=loop) stop: asyncio.Future[None] = loop.create_future() asyncio.ensure_future( runClient( url, role, secret, channel, position, stream_sql, verbose, username, password, loop, inputs, stop, ), loop=loop, ) thread = threading.Thread(target=loop.run_forever) thread.start() try: while True: message = input('> ') sys.stdout.write( "\N{ESC}[A" "\N{CARRIAGE RETURN}" "\N{ESC}[K" ) loop.call_soon_threadsafe(inputs.put_nowait, message) except (KeyboardInterrupt, EOFError): loop.call_soon_threadsafe(stop.set_result, None) thread.join()
bavarde chat cli
https://github.com/machinezone/cobra/blob/a97bcf070128f2b1dcc51fa635e8257c2d14a95d/cobras/bavarde/runner/client.py#L39-L107
import asyncio import getpass import os import pprint import threading import sys import click from cobras.bavarde.client.client import runClient from cobras.bavarde.client.client import ( DEFAULT_LOCAL_URL, DEFAULT_URL, DEFAULT_ROLE, DEFAULT_SECRET, ) @click.command() @click.option('--url', default=DEFAULT_URL) @click.option('--role', envvar='BAVARDE_DEFAULT_ROLE', default=DEFAULT_ROLE) @click.option( '--secret', envvar='BAVARDE_DEFAULT_SECRET', default=DEFAULT_SECRET, required=True ) @click.option('--channel', envvar='BAVARDE_DEFAULT_CHANNEL', default='lobby') @click.option('--position', default='0-0') @click.option('--username', default=getpass.getuser()) @click.option('--password') @click.option('--verbose', '-v', count=True) @click.option('--dev', '-v', count=True) @click.option('--stream_sql')
BSD 3-Clause New or Revised License
aerosoul94/fatx-tools
fatx/analysis/metadata_analyzer.py
FatXAnalyzer.get_orphanage
python
def get_orphanage(self): return self.orphanage
Orphanage contains the list of orphaned (recovered) dirents.
https://github.com/aerosoul94/fatx-tools/blob/efdae5a2b416f30b7ee45f574f43fd5778922438/fatx/analysis/metadata_analyzer.py#L28-L30
from .orphan import FatXOrphan import logging import time import json LOG = logging.getLogger('FATX.Analyzer') class FatXAnalyzer: def __init__(self, volume, full_scan=False): self.volume = volume self.roots = [] self.orphanage = [] self.current_block = 0
MIT License
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/accounts/manager.py
AccountDBManager.get_account_from_name
python
def get_account_from_name(self, uname): try: return self.get(username__iexact=uname) except self.model.DoesNotExist: return None
Get account object based on name. Args: uname (str): The Account name to search for. Returns: account (Account): The found account.
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/accounts/manager.py#L127-L141
import datetime from django.utils import timezone from django.contrib.auth.models import UserManager from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager __all__ = ("AccountManager",) class AccountDBManager(TypedObjectManager, UserManager): def num_total_accounts(self): return self.count() def get_connected_accounts(self): return self.filter(db_is_connected=True) def get_recently_created_accounts(self, days=7): end_date = timezone.now() tdelta = datetime.timedelta(days) start_date = end_date - tdelta return self.filter(date_joined__range=(start_date, end_date)) def get_recently_connected_accounts(self, days=7): end_date = timezone.now() tdelta = datetime.timedelta(days) start_date = end_date - tdelta return self.filter(last_login__range=(start_date, end_date)).order_by("-last_login") def get_account_from_email(self, uemail): return self.filter(email__iexact=uemail) def get_account_from_uid(self, uid): try: return self.get(id=uid) except self.model.DoesNotExist: return None
MIT License
onicagroup/runway
runway/cfngin/utils.py
Extractor.__init__
python
def __init__(self, archive: Optional[Path] = None) -> None: self.archive = archive
Instantiate class. Args: archive (str): Archive path.
https://github.com/onicagroup/runway/blob/6e0d543512325a92265c140f386c163f6be410b4/runway/cfngin/utils.py#L521-L528
from __future__ import annotations import copy import logging import os import re import shutil import subprocess import sys import tarfile import tempfile import uuid import zipfile from pathlib import Path from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, Iterator, List, Optional, OrderedDict, Type, Union, cast, ) import botocore.client import botocore.exceptions import dateutil import yaml from yaml.constructor import ConstructorError from .awscli_yamlhelper import yaml_parse from .session_cache import get_session if TYPE_CHECKING: from mypy_boto3_route53.client import Route53Client from mypy_boto3_route53.type_defs import ResourceRecordSetTypeDef from mypy_boto3_s3.client import S3Client from ..config.models.cfngin import ( CfnginPackageSourcesDefinitionModel, GitCfnginPackageSourceDefinitionModel, LocalCfnginPackageSourceDefinitionModel, S3CfnginPackageSourceDefinitionModel, ) from .blueprints.base import Blueprint LOGGER = logging.getLogger(__name__) def camel_to_snake(name: str) -> str: sub_str_1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", sub_str_1).lower() def convert_class_name(kls: type) -> str: return camel_to_snake(kls.__name__) def parse_zone_id(full_zone_id: str) -> str: return full_zone_id.split("/")[2] def get_hosted_zone_by_name(client: Route53Client, zone_name: str) -> Optional[str]: paginator = client.get_paginator("list_hosted_zones") for page in paginator.paginate(): for zone in page["HostedZones"]: if zone["Name"] == zone_name: return parse_zone_id(zone["Id"]) return None def get_or_create_hosted_zone(client: Route53Client, zone_name: str) -> str: zone_id = get_hosted_zone_by_name(client, zone_name) if zone_id: return zone_id LOGGER.debug("zone %s does not exist; creating", zone_name) reference = uuid.uuid4().hex response = client.create_hosted_zone(Name=zone_name, CallerReference=reference) return parse_zone_id(response["HostedZone"]["Id"]) class SOARecordText: def __init__(self, record_text: str) -> None: ( self.nameserver, self.contact, self.serial, self.refresh, self.retry, self.expire, self.min_ttl, ) = record_text.split() def __str__(self) -> str: return " ".join( [ self.nameserver, self.contact, self.serial, self.refresh, self.retry, self.expire, self.min_ttl, ] ) class SOARecord: def __init__(self, record: ResourceRecordSetTypeDef) -> None: self.name = record["Name"] self.text = SOARecordText(record.get("ResourceRecords", [{}])[0]["Value"]) self.ttl = record.get("TTL", 0) def get_soa_record(client: Route53Client, zone_id: str, zone_name: str) -> SOARecord: response = client.list_resource_record_sets( HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType="SOA", MaxItems="1", ) return SOARecord(response["ResourceRecordSets"][0]) def create_route53_zone(client: Route53Client, zone_name: str) -> str: if not zone_name.endswith("."): zone_name += "." zone_id = get_or_create_hosted_zone(client, zone_name) old_soa = get_soa_record(client, zone_id, zone_name) if old_soa.text.min_ttl == "300": return zone_id new_soa = copy.deepcopy(old_soa) LOGGER.debug("updating negative caching value on zone %s to 300", zone_name) new_soa.text.min_ttl = "300" client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={ "Comment": "Update SOA min_ttl to 300.", "Changes": [ { "Action": "UPSERT", "ResourceRecordSet": { "Name": zone_name, "Type": "SOA", "TTL": old_soa.ttl, "ResourceRecords": [{"Value": str(new_soa.text)}], }, }, ], }, ) return zone_id def yaml_to_ordered_dict( stream: str, loader: Union[Type[yaml.Loader], Type[yaml.SafeLoader]] = yaml.SafeLoader, ) -> OrderedDict[str, Any]: class OrderedUniqueLoader(loader): NO_DUPE_SIBLINGS = ["stacks", "class_path"] NO_DUPE_CHILDREN = ["stacks"] @staticmethod def _error_mapping_on_dupe( node: Union[yaml.MappingNode, yaml.ScalarNode, yaml.SequenceNode], node_name: str, ) -> None: if isinstance(node, yaml.MappingNode): mapping: Dict[str, Any] = {} for val in node.value: a = val[0] b = mapping.get(a.value, None) if b: raise ConstructorError( f"{node_name} mapping cannot have duplicate keys " f"{b.start_mark} {a.start_mark}" ) mapping[a.value] = a def _validate_mapping( self, node: Union[yaml.MappingNode, yaml.ScalarNode, yaml.SequenceNode], deep: bool = False, ) -> OrderedDict[Any, Any]: if not isinstance(node, yaml.MappingNode): raise ConstructorError( None, None, f"expected a mapping node, but found {node.id}", node.start_mark, ) mapping: OrderedDict[Any, Any] = OrderedDict() for key_node, value_node in node.value: key = cast(object, self.construct_object(key_node, deep=deep)) try: hash(key) except TypeError as exc: raise ConstructorError( "while constructing a mapping", node.start_mark, f"found unhashable key ({exc})", key_node.start_mark, ) from exc if key in mapping and key in self.NO_DUPE_SIBLINGS: raise ConstructorError( f"{key} key cannot have duplicate siblings " f"{node.start_mark} {key_node.start_mark}" ) if key in self.NO_DUPE_CHILDREN: self._error_mapping_on_dupe(value_node, key_node.value) value = cast(object, self.construct_object(value_node, deep=deep)) mapping[key] = value return mapping def construct_mapping( self, node: Union[yaml.MappingNode, yaml.ScalarNode, yaml.SequenceNode], deep: bool = False, ) -> OrderedDict[Any, Any]: if isinstance(node, yaml.MappingNode): self.flatten_mapping(node) return self._validate_mapping(node, deep=deep) def construct_yaml_map( self, node: Union[yaml.MappingNode, yaml.ScalarNode, yaml.SequenceNode] ) -> Iterator[OrderedDict[Any, Any]]: data: OrderedDict[Any, Any] = OrderedDict() yield data value: OrderedDict[Any, Any] = self.construct_mapping(node) data.update(value) OrderedUniqueLoader.add_constructor( "tag:yaml.org,2002:map", OrderedUniqueLoader.construct_yaml_map ) return yaml.load(stream, OrderedUniqueLoader) def uppercase_first_letter(string_: str) -> str: return string_[0].upper() + string_[1:] def cf_safe_name(name: str) -> str: alphanumeric = r"[a-zA-Z0-9]+" parts = re.findall(alphanumeric, name) return "".join(uppercase_first_letter(part) for part in parts) def read_value_from_path(value: str, *, root_path: Optional[Path] = None) -> str: if value.startswith("file://"): path = value.split("file://", 1)[1] if os.path.isabs(path): read_path = Path(path) else: root_path = root_path or Path.cwd() if root_path.is_dir(): read_path = root_path / path else: read_path = root_path.parent / path if read_path.is_file(): return read_path.read_text() if read_path.is_dir(): raise ValueError( f"path must lead to a file not directory: {read_path.absolute()}" ) raise ValueError(f"path does not exist: {read_path.absolute()}") return value def get_client_region(client: Any) -> str: return client._client_config.region_name def get_s3_endpoint(client: Any) -> str: return client._endpoint.host def s3_bucket_location_constraint(region: Optional[str]) -> Optional[str]: if region == "us-east-1": return "" return region def ensure_s3_bucket( s3_client: S3Client, bucket_name: str, bucket_region: Optional[str] = None, *, create: bool = True, persist_graph: bool = False, ) -> None: try: s3_client.head_bucket(Bucket=bucket_name) if persist_graph: response = s3_client.get_bucket_versioning(Bucket=bucket_name) state = response.get("Status", "disabled").lower() if state != "enabled": LOGGER.warning( 'versioning is %s on bucket "%s"; it is recommended to ' "enable versioning when using persistent graphs", state, bucket_name, ) if response.get("MFADelete", "Disabled") != "Disabled": LOGGER.warning( 'MFADelete must be disabled on bucket "%s" when using ' "persistent graphs to allow for propper management of " "the graphs", bucket_name, ) except botocore.exceptions.ClientError as err: if err.response["Error"]["Message"] == "Not Found" and create: LOGGER.debug("creating bucket %s", bucket_name) create_args: Dict[str, Any] = {"Bucket": bucket_name} location_constraint = s3_bucket_location_constraint(bucket_region) if location_constraint: create_args["CreateBucketConfiguration"] = { "LocationConstraint": location_constraint } s3_client.create_bucket(**create_args) if persist_graph: s3_client.put_bucket_versioning( Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"} ) return if err.response["Error"]["Message"] == "Forbidden": LOGGER.exception( "Access denied for bucket %s. Did you remember " "to use a globally unique name?", bucket_name, ) elif err.response["Error"]["Message"] != "Not Found": LOGGER.exception('error creating bucket "%s"', bucket_name) raise def parse_cloudformation_template(template: str) -> Dict[str, Any]: return yaml_parse(template) class Extractor: extension: ClassVar[str] = ""
Apache License 2.0
triton-inference-server/model_analyzer
model_analyzer/config/input/objects/config_model_profile_spec.py
ConfigModelProfileSpec.set_cpu_only
python
def set_cpu_only(self, cpu_only): self._cpu_only = cpu_only
Parameters ------- cpu_only: bool Set whether this model is run on cpu only
https://github.com/triton-inference-server/model_analyzer/blob/3151792403b4e3257dd5188fe745bbdf68e521e9/model_analyzer/config/input/objects/config_model_profile_spec.py#L159-L167
class ConfigModelProfileSpec: def __init__(self, model_name, cpu_only=False, objectives=None, constraints=None, parameters=None, model_config_parameters=None, perf_analyzer_flags=None, triton_server_flags=None, triton_server_environment=None): self._model_name = model_name self._cpu_only = cpu_only self._objectives = objectives self._constraints = constraints self._parameters = parameters self._model_config_parameters = model_config_parameters self._perf_analyzer_flags = perf_analyzer_flags self._triton_server_flags = triton_server_flags self._triton_server_environment = triton_server_environment def cpu_only(self): return self._cpu_only def objectives(self): return self._objectives def constraints(self): return self._constraints def parameters(self): return self._parameters def model_config_parameters(self): return self._model_config_parameters def model_name(self): return self._model_name def perf_analyzer_flags(self): return self._perf_analyzer_flags def triton_server_flags(self): return self._triton_server_flags def triton_server_environment(self): return self._triton_server_environment
Apache License 2.0
rapid7/vm-console-client-python
rapid7vmconsole/models/sonar_query.py
SonarQuery.id
python
def id(self, id): self._id = id
Sets the id of this SonarQuery. The identifier of the Sonar query. # noqa: E501 :param id: The id of this SonarQuery. # noqa: E501 :type: int
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/sonar_query.py#L98-L107
import pprint import re import six class SonarQuery(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'criteria': 'SonarCriteria', 'id': 'int', 'links': 'list[Link]', 'name': 'str' } attribute_map = { 'criteria': 'criteria', 'id': 'id', 'links': 'links', 'name': 'name' } def __init__(self, criteria=None, id=None, links=None, name=None): self._criteria = None self._id = None self._links = None self._name = None self.discriminator = None if criteria is not None: self.criteria = criteria if id is not None: self.id = id if links is not None: self.links = links if name is not None: self.name = name @property def criteria(self): return self._criteria @criteria.setter def criteria(self, criteria): self._criteria = criteria @property def id(self): return self._id @id.setter
MIT License
stypr/clubhouse-py
clubhouse/clubhouse.py
Clubhouse.get_channel
python
def get_channel(self, channel, channel_id=None): data = { "channel": channel, "channel_id": channel_id } req = requests.post(f"{self.API_URL}/get_channel", headers=self.HEADERS, json=data) return req.json()
(Clubhouse, str, int) -> dict Get information of the given channel
https://github.com/stypr/clubhouse-py/blob/a0aad17a42a4f391fc40eebc36e5535e629bdd9a/clubhouse/clubhouse.py#L788-L798
import uuid import random import secrets import functools import requests class Clubhouse: API_URL = "https://www.clubhouseapi.com/api" API_BUILD_ID_IOS = "434" API_BUILD_VERSION = "0.1.40" API_BUILD_ID_ANDROID = "3389" API_BUILD_VERSION_ANDROID= "1.0.1" API_UA_IOS = f"clubhouse/{API_BUILD_ID_IOS} (iPhone; iOS 14.4; Scale/2.00)" API_UA_ANDROID = f"clubhouse/android/{API_BUILD_ID_ANDROID}" API_UA_STATIC = f"Clubhouse/{API_BUILD_ID_IOS} CFNetwork/1220.1 Darwin/20.3.0" PUBNUB_PUB_KEY = "pub-c-6878d382-5ae6-4494-9099-f930f938868b" PUBNUB_SUB_KEY = "sub-c-a4abea84-9ca3-11ea-8e71-f2b83ac9263d" PUBNUB_API_URL = "https://clubhouse.pubnubapi.com/v2" SENTRY_URL = "63d2d71e7f424c41a2ede9ad3d703960@o325556.ingest.sentry.io/5245095" TWITTER_ID = "NyJhARWVYU1X3qJZtC2154xSI" TWITTER_SECRET = "ylFImLBFaOE362uwr4jut8S8gXGWh93S1TUKbkfh7jDIPse02o" INSTAGRAM_ID = "1352866981588597" INSTAGRAM_CALLBACK = "https://www.joinclubhouse.com/callback/instagram" AGORA_KEY = "938de3e8055e42b281bb8c6f69c21f78" INSTABUG_KEY = "4e53155da9b00728caa5249f2e35d6b3" AMPLITUDE_KEY = "9098a21a950e7cb0933fb5b30affe5be" STRIPE_PUBLISH_KEY = "63d2d71e7f424c41a2ede9ad3d703960@o325556.ingest.sentry.io/5245095" ANDROID_API_KEY = "AIzaSyDGJ877BvgHAg2Bed1sgFjZ4wJmh2RfEfU" ANDROID_API_ID = "1:1096237342636:android:c800b1b9e5ee70d1f8a409" ANDROID_RECAPTCHA_KEY = "LcNAMYaAAAAAKDxm-jPPMrJvh_VTiWyWy4D9jp3" IOS_API_ID = "co.alphaexploration.clubhouse:16.0.3" IOS_RECAPTCHA_KEY = "6LeWyKUaAAAAAA7XsHRe-JWuI1qLwoZn5p3seyoW" HEADERS = { "CH-Languages": "en-JP,ja-JP", "CH-Locale": "en_JP", "Accept": "application/json", "Accept-Language": "en-JP;q=1, ja-JP;q=0.9", "Accept-Encoding": "gzip, deflate", "ch-keyboards": "en_US", "CH-AppBuild": f"{API_BUILD_ID_ANDROID}", "CH-AppVersion": f"{API_BUILD_VERSION_ANDROID}", "User-Agent": f"{API_UA_ANDROID}", "Connection": "close", "Content-Type": "application/json; charset=utf-8", "Cookie": f"__cfduid={secrets.token_hex(21)}{random.randint(1, 9)}" } def require_authentication(func): @functools.wraps(func) def wrap(self, *args, **kwargs): if not (self.HEADERS.get("CH-UserID") and self.HEADERS.get("CH-DeviceId") and self.HEADERS.get("Authorization")): raise Exception('Not Authenticated') return func(self, *args, **kwargs) return wrap def unstable_endpoint(func): @functools.wraps(func) def wrap(self, *args, **kwargs): print("[!] This endpoint is NEVER TESTED and MAY BE UNSTABLE. BE CAREFUL!") return func(self, *args, **kwargs) return wrap def __init__(self, user_id='', user_token='', user_device='', headers=None): self.HEADERS = dict(self.HEADERS) if isinstance(headers, dict): self.HEADERS.update(headers) self.HEADERS['CH-UserID'] = user_id if user_id else "(null)" if user_token: self.HEADERS['Authorization'] = f"Token {user_token}" self.HEADERS['CH-DeviceId'] = user_device.upper() if user_device else str(uuid.uuid4()).upper() def __str__(self): return "Clubhouse(user_Id={}, user_token={}, user_device={})".format( self.HEADERS.get('CH-UserID'), self.HEADERS.get('Authorization'), self.HEADERS.get('CH-DeviceId') ) def start_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/start_phone_number_auth", headers=self.HEADERS, json=data) return req.json() @unstable_endpoint def call_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/call_phone_number_auth", headers=self.HEADERS, json=data) return req.json() @unstable_endpoint def resend_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/resend_phone_number_auth", headers=self.HEADERS, json=data) return req.json() def complete_phone_number_auth(self, phone_number, verification_code, rc_token=None, safety_net_nonce=None, safety_net_response=None): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "device_token": None, "rc_token": rc_token, "safety_net_nonce": safety_net_nonce, "safety_net_response": safety_net_response, "phone_number": phone_number, "verification_code": verification_code } req = requests.post(f"{self.API_URL}/complete_phone_number_auth", headers=self.HEADERS, json=data) return req.json() def check_for_update(self, is_testflight=False): query = f"is_testflight={int(is_testflight)}" req = requests.get(f"{self.API_URL}/check_for_update?{query}", headers=self.HEADERS) return req.json() @require_authentication def logout(self): data = {} req = requests.post(f"{self.API_URL}/logout", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_release_notes(self): req = requests.post(f"{self.API_URL}/get_release_notes", headers=self.HEADERS) return req.json() @require_authentication def check_waitlist_status(self): req = requests.post(f"{self.API_URL}/check_waitlist_status", headers=self.HEADERS) return req.json() @require_authentication def add_email(self, email): data = { "email": email } req = requests.post(f"{self.API_URL}/add_email", headers=self.HEADERS, json=data) return req.json() @require_authentication def update_photo(self, photo_filename): files = { "file": ("image.jpg", open(photo_filename, "rb"), "image/jpeg"), } tmp = self.HEADERS['Content-Type'] self.HEADERS.pop("Content-Type") req = requests.post(f"{self.API_URL}/update_photo", headers=self.HEADERS, files=files) self.HEADERS['Content-Type'] = tmp return req.json() @require_authentication def follow(self, user_id, user_ids=None, source=4, source_topic_id=None): data = { "source_topic_id": source_topic_id, "user_ids": user_ids, "user_id": int(user_id), "source": source } req = requests.post(f"{self.API_URL}/follow", headers=self.HEADERS, json=data) return req.json() @require_authentication def unfollow(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/unfollow", headers=self.HEADERS, json=data) return req.json() @require_authentication def block(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/block", headers=self.HEADERS, json=data) return req.json() @require_authentication def unblock(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/unblock", headers=self.HEADERS, json=data) return req.json() @require_authentication def follow_multiple(self, user_ids, user_id=None, source=7, source_topic_id=None): data = { "source_topic_id": source_topic_id, "user_ids": user_ids, "user_id": user_id, "source": source } req = requests.post(f"{self.API_URL}/follow_multiple", headers=self.HEADERS, json=data) return req.json() @require_authentication def follow_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id } req = requests.post(f"{self.API_URL}/follow_club", headers=self.HEADERS, json=data) return req.json() @require_authentication def unfollow_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id } req = requests.post(f"{self.API_URL}/unfollow_club", headers=self.HEADERS, json=data) return req.json() @require_authentication def update_follow_notifications(self, user_id, notification_type=2): data = { "user_id": int(user_id), "notification_type": int(notification_type) } req = requests.post(f"{self.API_URL}/update_follow_notifications", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_similar(self, user_id='', username=''): data = { "user_id": int(user_id) if user_id else None, "username": username if username else None, "query_id": None, "query_result_position": None, } req = requests.post(f"{self.API_URL}/get_suggested_follows_similar", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_friends_only(self, club_id=None, upload_contacts=True, contacts=()): data = { "club_id": club_id, "upload_contacts": upload_contacts, "contacts": contacts } req = requests.post(f"{self.API_URL}/get_suggested_follows_friends_only", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_all(self, in_onboarding=True, page_size=50, page=1): query = "in_onboarding={}&page_size={}&page={}".format( "true" if in_onboarding else "false", page_size, page ) req = requests.get(f"{self.API_URL}/get_suggested_follows_all?{query}", headers=self.HEADERS) return req.json() @require_authentication def ignore_suggested_follow(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/user_id", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_event(self, event_id=None, user_ids=None, club_id=None, is_member_only=False, event_hashid=None, description=None, time_start_epoch=None, name=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/get_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def create_event(self, name, time_start_epoch, description, event_id=None, user_ids=(), club_id=None, is_member_only=False, event_hashid=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/edit_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def edit_event(self, name, time_start_epoch, description, event_id=None, user_ids=(), club_id=None, is_member_only=False, event_hashid=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/edit_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def delete_event(self, event_id, user_ids=None, club_id=None, is_member_only=False, event_hashid=None, description=None, time_start_epoch=None, name=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/delete_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_events(self, is_filtered=True, page_size=25, page=1): _is_filtered = "true" if is_filtered else "false" query = "is_filtered={}&page_size={}&page={}".format( "true" if is_filtered else "false", page_size, page ) req = requests.get(f"{self.API_URL}/get_events?{query}", headers=self.HEADERS) return req.json() @require_authentication def get_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id, "query_id": None, "query_result_position": None, "slug": None, } req = requests.post(f"{self.API_URL}/get_club", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_club_members(self, club_id, return_followers=False, return_members=True, page_size=50, page=1): query = "club_id={}&return_followers={}&return_members={}&page_size={}&page={}".format( club_id, int(return_followers), int(return_members), page_size, page ) req = requests.get(f"{self.API_URL}/get_club_members?{query}", headers=self.HEADERS) return req.json() @require_authentication def get_settings(self): req = requests.get(f"{self.API_URL}/get_settings", headers=self.HEADERS) return req.json() @require_authentication def get_welcome_channel(self): req = requests.get(f"{self.API_URL}/get_welcome_channel", headers=self.HEADERS) return req.json() @require_authentication def hide_channel(self, channel, hide=True): data = { "channel": channel, "hide": hide } req = requests.post(f"{self.API_URL}/hide_channel", headers=self.HEADERS, json=data) return req.json() @require_authentication def join_channel(self, channel, attribution_source="feed", attribution_details="eyJpc19leHBsb3JlIjpmYWxzZSwicmFuayI6MX0="): data = { "channel": channel, "attribution_source": attribution_source, "attribution_details": attribution_details, } req = requests.post(f"{self.API_URL}/join_channel", headers=self.HEADERS, json=data) return req.json() @require_authentication def leave_channel(self, channel): data = { "channel": channel } req = requests.post(f"{self.API_URL}/leave_channel", headers=self.HEADERS, json=data) return req.json() @require_authentication def make_channel_public(self, channel, channel_id=None): data = { "channel": channel, "channel_id": channel_id } req = requests.post(f"{self.API_URL}/make_channel_public", headers=self.HEADERS, json=data) return req.json() @require_authentication def make_channel_social(self, channel, channel_id=None): data = { "channel": channel, "channel_id": channel_id } req = requests.post(f"{self.API_URL}/make_channel_social", headers=self.HEADERS, json=data) return req.json() @require_authentication def end_channel(self, channel, channel_id=None): data = { "channel": channel, "channel_id": channel_id } req = requests.post(f"{self.API_URL}/end_channel", headers=self.HEADERS, json=data) return req.json() @require_authentication def make_moderator(self, channel, user_id): data = { "channel": channel, "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/make_moderator", headers=self.HEADERS, json=data) return req.json() @require_authentication def block_from_channel(self, channel, user_id): data = { "channel": channel, "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/block_from_channel", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_profile(self, user_id='', username=''): data = { "query_id": None, "query_result_position": 0, "user_id": int(user_id) if user_id else None, "username": username if username else None } req = requests.post(f"{self.API_URL}/get_profile", headers=self.HEADERS, json=data) return req.json() @require_authentication def me(self, return_blocked_ids=False, timezone_identifier="Asia/Tokyo", return_following_ids=False): data = { "return_blocked_ids": return_blocked_ids, "timezone_identifier": timezone_identifier, "return_following_ids": return_following_ids } req = requests.post(f"{self.API_URL}/me", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_following(self, user_id, page_size=50, page=1): query = "user_id={}&page_size={}&page={}".format( user_id, page_size, page ) req = requests.get(f"{self.API_URL}/get_following?{query}", headers=self.HEADERS) return req.json() @require_authentication def get_followers(self, user_id, page_size=50, page=1): query = "user_id={}&page_size={}&page={}".format( user_id, page_size, page ) req = requests.get(f"{self.API_URL}/get_followers?{query}", headers=self.HEADERS) return req.json() @require_authentication def get_mutual_follows(self, user_id, page_size=50, page=1): query = "user_id={}&page_size={}&page={}".format( user_id, page_size, page ) req = requests.get(f"{self.API_URL}/get_mutual_follows?{query}", headers=self.HEADERS) return req.json() @require_authentication def get_all_topics(self): req = requests.get(f"{self.API_URL}/get_all_topics", headers=self.HEADERS) return req.json() @require_authentication def get_feed(self): req = requests.get(f"{self.API_URL}/get_feed?", headers=self.HEADERS) return req.json() @require_authentication def get_channels(self): req = requests.get(f"{self.API_URL}/get_channels", headers=self.HEADERS) return req.json() @require_authentication
MIT License
youssefsharief/arabic-tacotron-tts
models/attention.py
_compute_attention
python
def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer): alignments, next_attention_state = attention_mechanism( cell_output, state=attention_state) expanded_alignments = array_ops.expand_dims(alignments, 1) context = math_ops.matmul(expanded_alignments, attention_mechanism.values) context = array_ops.squeeze(context, [1]) if attention_layer is not None: attention = attention_layer(array_ops.concat([cell_output, context], 1)) else: attention = context return attention, alignments, next_attention_state
Computes the attention and alignments for a given attention_mechanism.
https://github.com/youssefsharief/arabic-tacotron-tts/blob/fab3b4ef928e8e15b63c4b08cca1872a08104eb9/models/attention.py#L10-L34
import tensorflow as tf from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import BahdanauAttention from tensorflow.python.layers import core as layers_core from tensorflow.python.ops import array_ops, math_ops, nn_ops, variable_scope
MIT License
seaglass-project/seaglass
common/lib/gps_python3/fake.py
DaemonInstance.add_device
python
def add_device(self, path): if self.__get_control_socket(): self.sock.sendall("+%s\r\n\x00" % path) self.sock.recv(12) self.sock.close()
Add a device to the daemon's internal search list.
https://github.com/seaglass-project/seaglass/blob/04ae18807d188b211167acdb329050a173cba0ba/common/lib/gps_python3/fake.py#L480-L485
import os, sys, time, signal, pty, termios import threading, socket, select import gps import packet as sniffer if sys.platform.startswith("linux"): WRITE_PAD = 0.0 CLOSE_DELAY = 0.1 elif sys.platform.startswith("freebsd"): WRITE_PAD = 0.001 CLOSE_DELAY = 0.4 else: WRITE_PAD = 0.004 CLOSE_DELAY = 0.8 class TestLoadError(Exception): def __init__(self, msg): super.__init__(self) self.msg = msg class TestLoad: def __init__(self, logfp, predump=False): self.sentences = [] if type(logfp) == type(""): logfp = open(logfp, "r") self.name = logfp.name self.logfp = logfp self.predump = predump self.type = None self.sourcetype = "pty" self.serial = None self.delay = WRITE_PAD self.delimiter = None text = logfp.read() logfp = open(logfp.name) getter = sniffer.new() type_latch = None commentlen = 0 while True: (plen, ptype, packet, _counter) = getter.get(logfp.fileno()) if plen <= 0: break elif ptype == sniffer.COMMENT_PACKET: commentlen += len(packet) if "Serial:" in packet: packet = packet[1:].strip() try: (_xx, baud, params) = packet.split() baud = int(baud) if params[0] in ('7', '8'): databits = int(params[0]) else: raise ValueError if params[1] in ('N', 'O', 'E'): parity = params[1] else: raise ValueError if params[2] in ('1', '2'): stopbits = int(params[2]) else: raise ValueError except (ValueError, IndexError): raise TestLoadError("bad serial-parameter spec in %s"% self.name) self.serial = (baud, databits, parity, stopbits) elif "Transport: UDP" in packet: self.sourcetype = "UDP" elif "Transport: TCP" in packet: self.sourcetype = "TCP" elif "Delay-Cookie:" in packet: if packet.startswith("#"): packet = packet[1:] try: (_dummy, self.delimiter, delay) = packet.strip().split() self.delay = float(delay) except ValueError: raise TestLoadError("bad Delay-Cookie line in %s"% self.name) self.resplit = True else: if type_latch is None: type_latch = ptype if self.predump: print(repr(packet)) if not packet: raise TestLoadError("zero-length packet from %s"% self.name) self.sentences.append(packet) self.textual = (type_latch == sniffer.NMEA_PACKET) if self.textual: self.legend = "gpsfake: line %d: " else: self.legend = "gpsfake: packet %d" if self.delimiter is not None: self.sentences = text[commentlen:].split(self.delimiter) class PacketError(exceptions.Exception): def __init__(self, msg): exceptions.Exception.__init__(self) self.msg = msg class FakeGPS: def __init__(self, testload, progress=None): self.testload = testload self.progress = progress self.go_predicate = lambda: True self.readers = 0 self.index = 0 self.progress("gpsfake: %s provides %d sentences\n" % (self.testload.name, len(self.testload.sentences))) def write(self, line): raise ValueError(line) def feed(self): line = self.testload.sentences[self.index % len(self.testload.sentences)] if "%Delay:" in line: delay = line.split()[1] time.sleep(int(delay)) self.write(line) if self.progress: self.progress("gpsfake: %s feeds %d=%s\n" % (self.testload.name, len(line), repr(line))) time.sleep(self.testload.delay) self.index += 1 class FakePTY(FakeGPS): def __init__(self, testload, speed=4800, databits=8, parity='N', stopbits=1, progress=None): FakeGPS.__init__(self, testload, progress) if self.testload.serial: (speed, databits, parity, stopbits) = self.testload.serial self.speed = speed baudrates = { 0: termios.B0, 50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134, 150: termios.B150, 200: termios.B200, 300: termios.B300, 600: termios.B600, 1200: termios.B1200, 1800: termios.B1800, 2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600, 19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600, 115200: termios.B115200, 230400: termios.B230400, } (self.fd, self.slave_fd) = pty.openpty() self.byname = os.ttyname(self.slave_fd) (iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self.slave_fd) cc[termios.VMIN] = 1 cflag &= ~(termios.PARENB | termios.PARODD | termios.CRTSCTS) cflag |= termios.CREAD | termios.CLOCAL iflag = oflag = lflag = 0 iflag &=~ (termios.PARMRK | termios.INPCK) cflag &=~ (termios.CSIZE | termios.CSTOPB | termios.PARENB | termios.PARODD) if databits == 7: cflag |= termios.CS7 else: cflag |= termios.CS8 if stopbits == 2: cflag |= termios.CSTOPB if parity == 'E': iflag |= termios.INPCK cflag |= termios.PARENB elif parity == 'O': iflag |= termios.INPCK cflag |= termios.PARENB | termios.PARODD ispeed = ospeed = baudrates[speed] try: termios.tcsetattr(self.slave_fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]) except termios.error: raise TestLoadError("error attempting to set serial mode to %s %s%s%s" % (speed, databits, parity, stopbits)) def read(self): termios.tcflush(self.fd, termios.TCIFLUSH) def write(self, line): os.write(self.fd, line) def drain(self): termios.tcdrain(self.fd) class FakeTCP(FakeGPS): def __init__(self, testload, host, port, progress=None): FakeGPS.__init__(self, testload, progress) self.host = host self.port = int(port) self.byname = "tcp://" + host + ":" + str(port) self.dispatcher = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.dispatcher.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.dispatcher.bind((self.host, self.port)) self.dispatcher.listen(5) self.readables = [self.dispatcher] def read(self): readable, _writable, _errored = select.select(self.readables, [], [], 0) for s in readable: if s == self.dispatcher: client_socket, _address = s.accept() self.readables = [client_socket] self.dispatcher.close() else: data = s.recv(1024) if not data: s.close() self.readables.remove(s) def write(self, line): for s in self.readables: if s != self.dispatcher: s.send(line) def drain(self): for s in self.readables: if s != self.dispatcher: s.shutdown(socket.SHUT_RDWR) class FakeUDP(FakeGPS): def __init__(self, testload, ipaddr, port, progress=None): FakeGPS.__init__(self, testload, progress) self.ipaddr = ipaddr self.port = port self.byname = "udp://" + ipaddr + ":" + str(port) self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def read(self): pass def write(self, line): self.sock.sendto(line, (self.ipaddr, int(self.port))) def drain(self): pass class DaemonError(exceptions.Exception): def __init__(self, msg): exceptions.Exception.__init__(self) self.msg = msg def __str__(self): return repr(self.msg) class DaemonInstance: def __init__(self, control_socket=None): self.sockfile = None self.pid = None self.tmpdir = os.environ.get('TMPDIR', '/tmp') if control_socket: self.control_socket = control_socket else: self.control_socket = "%s/gpsfake-%d.sock" % (self.tmpdir, os.getpid()) self.pidfile = "%s/gpsfake-%d.pid" % (self.tmpdir, os.getpid()) def spawn(self, options, port, background=False, prefix=""): self.spawncmd = None if os.environ.get('GPSD_HOME'): for path in os.environ['GPSD_HOME'].split(':'): _spawncmd = "%s/gpsd" % path if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK): self.spawncmd = _spawncmd break if not self.spawncmd: if not '/usr/sbin' in os.environ['PATH']: os.environ['PATH']=os.environ['PATH'] + ":/usr/sbin" for path in os.environ['PATH'].split(':'): _spawncmd = "%s/gpsd" % path if os.path.isfile(_spawncmd) and os.access(_spawncmd, os.X_OK): self.spawncmd = _spawncmd break if not self.spawncmd: raise DaemonError("Cannot execute gpsd: executable not found. Set GPSD_HOME env variable") self.spawncmd += " -b -N -S %s -F %s -P %s %s" % (port, self.control_socket, self.pidfile, options) if prefix: self.spawncmd = prefix + " " + self.spawncmd.strip() if background: self.spawncmd += " &" status = os.system(self.spawncmd) if os.WIFSIGNALED(status) or os.WEXITSTATUS(status): raise DaemonError("daemon exited with status %d" % status) def wait_pid(self): while True: try: fp = open(self.pidfile) except IOError: time.sleep(0.1) continue try: fp.seek(0) pidstr = fp.read() self.pid = int(pidstr) except ValueError: time.sleep(0.5) continue fp.close() break def __get_control_socket(self): if not os.path.exists(self.control_socket): return None try: self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) self.sock.connect(self.control_socket) except socket.error: if self.sock: self.sock.close() self.sock = None return self.sock def is_alive(self): try: os.kill(self.pid, 0) return True except OSError: return False
BSD 3-Clause New or Revised License
deepchem/torchchem
torchchem/models/model.py
Model.__init__
python
def __init__(self, model_instance=None, model_dir=None, verbose=True, **kwargs): self.model_dir_is_temp = False if model_dir is not None: if not os.path.exists(model_dir): os.makedirs(model_dir) else: model_dir = tempfile.mkdtemp() self.model_dir_is_temp = True self.model_dir = model_dir self.model_instance = model_instance self.model_class = model_instance.__class__ self.verbose = verbose
Abstract class for all models. Parameters: ----------- model_instance: object Wrapper around ScikitLearn/Keras/Tensorflow model object. model_dir: str Path to directory where model will be stored.
https://github.com/deepchem/torchchem/blob/b4cee54088c2d1d52c349c3ed67126bc86940ba8/torchchem/models/model.py#L18-L42
import shutil import torch import time import numpy as np import tempfile from torchchem.utils import log class Model(object):
MIT License
res2net/res2net-detectron2
projects/TensorMask/tensormask/arch.py
TensorMaskHead.forward
python
def forward(self, features): pred_logits = [self.cls_score(self.cls_subnet(x)) for x in features] pred_deltas = [self.bbox_pred(self.bbox_subnet(x)) for x in features] pred_masks = None if self.mask_on: mask_feats = [self.mask_subnet(x) for x in features] if self.bipyramid_on: mask_feat_high_res = mask_feats[0] H, W = mask_feat_high_res.shape[-2:] mask_feats_up = [] for lvl, mask_feat in enumerate(mask_feats): lambda_val = 2.0 ** lvl mask_feat_up = mask_feat if lvl > 0: mask_feat_up = F.interpolate( mask_feat, scale_factor=lambda_val, mode="bilinear", align_corners=False ) mask_feats_up.append( self.mask_fuse(mask_feat_up[:, :, :H, :W] + mask_feat_high_res) ) mask_feats = mask_feats_up pred_masks = [] for lvl, mask_feat in enumerate(mask_feats): cur_masks = [] for mask_size in self.mask_sizes: cur_mask_module = getattr(self, "mask_pred_%02d" % mask_size) cur_mask = cur_mask_module(mask_feat) if self.align_on: if self.bipyramid_on: cur_mask_module = getattr(self, "align2nat_%02d" % lvl) cur_mask = cur_mask_module(cur_mask) else: cur_mask = self.align2nat(cur_mask) cur_masks.append(cur_mask) pred_masks.append(cur_masks) return pred_logits, pred_deltas, pred_masks
Arguments: features (list[Tensor]): FPN feature map tensors in high to low resolution. Each tensor in the list correspond to different feature levels. Returns: pred_logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). The tensor predicts the classification probability at each spatial position for each of the A anchors and K object classes. pred_deltas (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). The tensor predicts 4-vector (dx,dy,dw,dh) box regression values for every anchor. These values are the relative offset between the anchor and the ground truth box. pred_masks (list(list[Tensor])): #lvl list of tensors, each is a list of A tensors of shape (N, M_{i,a}, Hi, Wi). The tensor predicts a dense set of M_ixM_i masks at every location.
https://github.com/res2net/res2net-detectron2/blob/3677895d5d23635b67837e64a79370b9ee117c27/projects/TensorMask/tensormask/arch.py#L848-L904
import copy import logging import math from typing import List import torch import torch.nn.functional as F from fvcore.nn import sigmoid_focal_loss_star_jit, smooth_l1_loss from torch import nn from detectron2.layers import ShapeSpec, batched_nms, cat, paste_masks_in_image from detectron2.modeling.anchor_generator import DefaultAnchorGenerator from detectron2.modeling.backbone import build_backbone from detectron2.modeling.box_regression import Box2BoxTransform from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY from detectron2.modeling.meta_arch.retinanet import ( permute_all_cls_and_box_to_N_HWA_K_and_concat, permute_to_N_HWA_K, ) from detectron2.structures import Boxes, ImageList, Instances from detectron2.utils.logger import log_first_n from tensormask.layers import SwapAlign2Nat __all__ = ["TensorMask"] def _assignment_rule( gt_boxes, anchor_boxes, unit_lengths, min_anchor_size, scale_thresh=2.0, spatial_thresh=1.0, uniqueness_on=True, ): gt_boxes, anchor_boxes = gt_boxes.tensor, anchor_boxes.tensor N = gt_boxes.shape[0] M = anchor_boxes.shape[0] if N == 0 or M == 0: return ( gt_boxes.new_full((N,), 0, dtype=torch.int64), gt_boxes.new_full((N,), -1, dtype=torch.int8), ) lt = torch.min(gt_boxes[:, None, :2], anchor_boxes[:, :2]) rb = torch.max(gt_boxes[:, None, 2:], anchor_boxes[:, 2:]) union = cat([lt, rb], dim=2) dummy_gt_boxes = torch.zeros_like(gt_boxes) anchor = dummy_gt_boxes[:, None, :] + anchor_boxes[:, :] contain_matrix = torch.all(union == anchor, dim=2) gt_size_lower = torch.max(gt_boxes[:, 2:] - gt_boxes[:, :2], dim=1)[0] gt_size_upper = gt_size_lower * scale_thresh gt_size_upper[gt_size_upper < min_anchor_size] = min_anchor_size anchor_size = ( torch.max(anchor_boxes[:, 2:] - anchor_boxes[:, :2], dim=1)[0] - unit_lengths ) size_diff_upper = gt_size_upper[:, None] - anchor_size scale_matrix = size_diff_upper >= 0 gt_center = (gt_boxes[:, 2:] + gt_boxes[:, :2]) / 2 anchor_center = (anchor_boxes[:, 2:] + anchor_boxes[:, :2]) / 2 offset_center = gt_center[:, None, :] - anchor_center[:, :] offset_center /= unit_lengths[:, None] spatial_square = spatial_thresh * spatial_thresh spatial_matrix = torch.sum(offset_center * offset_center, dim=2) <= spatial_square assign_matrix = (contain_matrix & scale_matrix & spatial_matrix).int() matched_vals, matches = assign_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) match_labels[matched_vals == 0] = 0 match_labels[matched_vals == 1] = 1 not_unique_idxs = assign_matrix.sum(dim=0) > 1 if uniqueness_on: match_labels[not_unique_idxs] = 0 else: match_labels[not_unique_idxs] = -1 return matches, match_labels def _paste_mask_lists_in_image(masks, boxes, image_shape, threshold=0.5): if len(masks) == 0: return torch.empty((0, 1) + image_shape, dtype=torch.uint8) img_masks = [] ind_masks = [] mask_sizes = torch.tensor([m.shape[-1] for m in masks]) unique_sizes = torch.unique(mask_sizes) for msize in unique_sizes.tolist(): cur_ind = torch.where(mask_sizes == msize)[0] ind_masks.append(cur_ind) cur_masks = cat([masks[i] for i in cur_ind]) cur_boxes = boxes[cur_ind] img_masks.append(paste_masks_in_image(cur_masks, cur_boxes, image_shape, threshold)) img_masks = cat(img_masks) ind_masks = cat(ind_masks) img_masks_out = torch.empty_like(img_masks) img_masks_out[ind_masks, :, :] = img_masks return img_masks_out def _postprocess(results, result_mask_info, output_height, output_width, mask_threshold=0.5): scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0]) results = Instances((output_height, output_width), **results.get_fields()) output_boxes = results.pred_boxes output_boxes.tensor[:, 0::2] *= scale_x output_boxes.tensor[:, 1::2] *= scale_y output_boxes.clip(results.image_size) inds_nonempty = output_boxes.nonempty() results = results[inds_nonempty] result_masks, result_anchors = result_mask_info if result_masks: result_anchors.tensor[:, 0::2] *= scale_x result_anchors.tensor[:, 1::2] *= scale_y result_masks = [x for (i, x) in zip(inds_nonempty.tolist(), result_masks) if i] results.pred_masks = _paste_mask_lists_in_image( result_masks, result_anchors[inds_nonempty], results.image_size, threshold=mask_threshold, ) return results class TensorMaskAnchorGenerator(DefaultAnchorGenerator): def grid_anchors_with_unit_lengths_and_indexes(self, grid_sizes): anchors = [] unit_lengths = [] indexes = [] for lvl, (size, stride, base_anchors) in enumerate( zip(grid_sizes, self.strides, self.cell_anchors) ): grid_height, grid_width = size device = base_anchors.device shifts_x = torch.arange( 0, grid_width * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( 0, grid_height * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=2) cur_anchor = (shifts[:, :, None, :] + base_anchors.view(1, 1, -1, 4)).view(-1, 4) anchors.append(cur_anchor) unit_lengths.append( torch.full((cur_anchor.shape[0],), stride, dtype=torch.float32, device=device) ) shifts_l = torch.full((1,), lvl, dtype=torch.int64, device=device) shifts_i = torch.zeros((1,), dtype=torch.int64, device=device) shifts_h = torch.arange(0, grid_height, dtype=torch.int64, device=device) shifts_w = torch.arange(0, grid_width, dtype=torch.int64, device=device) shifts_a = torch.arange(0, base_anchors.shape[0], dtype=torch.int64, device=device) grids = torch.meshgrid(shifts_l, shifts_i, shifts_h, shifts_w, shifts_a) indexes.append(torch.stack(grids, dim=5).view(-1, 5)) return anchors, unit_lengths, indexes def forward(self, features): num_images = len(features[0]) grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_list, lengths_list, indexes_list = self.grid_anchors_with_unit_lengths_and_indexes( grid_sizes ) anchors_per_im = [Boxes(x) for x in anchors_list] anchors = [copy.deepcopy(anchors_per_im) for _ in range(num_images)] unit_lengths = [copy.deepcopy(lengths_list) for _ in range(num_images)] indexes = [copy.deepcopy(indexes_list) for _ in range(num_images)] return anchors, unit_lengths, indexes @META_ARCH_REGISTRY.register() class TensorMask(nn.Module): def __init__(self, cfg): super().__init__() self.num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES self.anchor_sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES self.num_levels = len(cfg.MODEL.ANCHOR_GENERATOR.SIZES) self.focal_loss_alpha = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_ALPHA self.focal_loss_gamma = cfg.MODEL.TENSOR_MASK.FOCAL_LOSS_GAMMA self.score_threshold = cfg.MODEL.TENSOR_MASK.SCORE_THRESH_TEST self.topk_candidates = cfg.MODEL.TENSOR_MASK.TOPK_CANDIDATES_TEST self.nms_threshold = cfg.MODEL.TENSOR_MASK.NMS_THRESH_TEST self.detections_im = cfg.TEST.DETECTIONS_PER_IMAGE self.mask_on = cfg.MODEL.MASK_ON self.mask_loss_weight = cfg.MODEL.TENSOR_MASK.MASK_LOSS_WEIGHT self.mask_pos_weight = torch.tensor(cfg.MODEL.TENSOR_MASK.POSITIVE_WEIGHT, dtype=torch.float32) self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON self.backbone = build_backbone(cfg) backbone_shape = self.backbone.output_shape() feature_shapes = [backbone_shape[f] for f in self.in_features] feature_strides = [x.stride for x in feature_shapes] self.anchor_generator = TensorMaskAnchorGenerator(cfg, feature_shapes) self.num_anchors = self.anchor_generator.num_cell_anchors[0] anchors_min_level = cfg.MODEL.ANCHOR_GENERATOR.SIZES[0] self.mask_sizes = [size // feature_strides[0] for size in anchors_min_level] self.min_anchor_size = min(anchors_min_level) - feature_strides[0] self.head = TensorMaskHead( cfg, self.num_levels, self.num_anchors, self.mask_sizes, feature_shapes ) self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.TENSOR_MASK.BBOX_REG_WEIGHTS) self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): images = self.preprocess_image(batched_inputs) if "instances" in batched_inputs[0]: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] elif "targets" in batched_inputs[0]: log_first_n( logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 ) gt_instances = [x["targets"].to(self.device) for x in batched_inputs] else: gt_instances = None features = self.backbone(images.tensor) features = [features[f] for f in self.in_features] pred_logits, pred_deltas, pred_masks = self.head(features) anchors, unit_lengths, indexes = self.anchor_generator(features) if self.training: gt_class_info, gt_delta_info, gt_mask_info, num_fg = self.get_ground_truth( anchors, unit_lengths, indexes, gt_instances ) return self.losses( gt_class_info, gt_delta_info, gt_mask_info, num_fg, pred_logits, pred_deltas, pred_masks, ) else: results = self.inference(pred_logits, pred_deltas, pred_masks, anchors, indexes, images) processed_results = [] for results_im, input_im, image_size in zip( results, batched_inputs, images.image_sizes ): height = input_im.get("height", image_size[0]) width = input_im.get("width", image_size[1]) result_box, result_mask = results_im r = _postprocess(result_box, result_mask, height, width) processed_results.append({"instances": r}) return processed_results def losses( self, gt_class_info, gt_delta_info, gt_mask_info, num_fg, pred_logits, pred_deltas, pred_masks, ): gt_classes_target, gt_valid_inds = gt_class_info gt_deltas, gt_fg_inds = gt_delta_info gt_masks, gt_mask_inds = gt_mask_info loss_normalizer = torch.tensor(max(1, num_fg), dtype=torch.float32, device=self.device) pred_logits, pred_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat( pred_logits, pred_deltas, self.num_classes ) loss_cls = ( sigmoid_focal_loss_star_jit( pred_logits[gt_valid_inds], gt_classes_target[gt_valid_inds], alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) / loss_normalizer ) if num_fg == 0: loss_box_reg = pred_deltas.sum() * 0 else: loss_box_reg = ( smooth_l1_loss(pred_deltas[gt_fg_inds], gt_deltas, beta=0.0, reduction="sum") / loss_normalizer ) losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} if self.mask_on: loss_mask = 0 for lvl in range(self.num_levels): cur_level_factor = 2 ** lvl if self.bipyramid_on else 1 for anc in range(self.num_anchors): cur_gt_mask_inds = gt_mask_inds[lvl][anc] if cur_gt_mask_inds is None: loss_mask += pred_masks[lvl][anc][0, 0, 0, 0] * 0 else: cur_mask_size = self.mask_sizes[anc] * cur_level_factor cur_size_divider = torch.tensor( self.mask_loss_weight / (cur_mask_size ** 2), dtype=torch.float32, device=self.device, ) cur_pred_masks = pred_masks[lvl][anc][ cur_gt_mask_inds[:, 0], :, cur_gt_mask_inds[:, 1], cur_gt_mask_inds[:, 2], ] loss_mask += F.binary_cross_entropy_with_logits( cur_pred_masks.view(-1, cur_mask_size, cur_mask_size), gt_masks[lvl][anc].to(dtype=torch.float32), reduction="sum", weight=cur_size_divider, pos_weight=self.mask_pos_weight, ) losses["loss_mask"] = loss_mask / loss_normalizer return losses @torch.no_grad() def get_ground_truth(self, anchors, unit_lengths, indexes, targets): gt_classes = [] gt_deltas = [] gt_masks = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] gt_mask_inds = [[[] for _ in range(self.num_anchors)] for _ in range(self.num_levels)] anchors = [Boxes.cat(anchors_i) for anchors_i in anchors] unit_lengths = [cat(unit_lengths_i) for unit_lengths_i in unit_lengths] indexes = [cat(indexes_i) for indexes_i in indexes] num_fg = 0 for i, (anchors_im, unit_lengths_im, indexes_im, targets_im) in enumerate( zip(anchors, unit_lengths, indexes, targets) ): gt_classes_i = torch.full_like( unit_lengths_im, self.num_classes, dtype=torch.int64, device=self.device ) has_gt = len(targets_im) > 0 if has_gt: gt_matched_inds, anchor_labels = _assignment_rule( targets_im.gt_boxes, anchors_im, unit_lengths_im, self.min_anchor_size ) fg_inds = anchor_labels == 1 fg_anchors = anchors_im[fg_inds] num_fg += len(fg_anchors) gt_fg_matched_inds = gt_matched_inds[fg_inds] gt_classes_i[fg_inds] = targets_im.gt_classes[gt_fg_matched_inds] gt_classes_i[anchor_labels == -1] = -1 matched_gt_boxes = targets_im[gt_fg_matched_inds].gt_boxes gt_deltas_i = self.box2box_transform.get_deltas( fg_anchors.tensor, matched_gt_boxes.tensor ) gt_deltas.append(gt_deltas_i) if self.mask_on: matched_indexes = indexes_im[fg_inds, :] for lvl in range(self.num_levels): ids_lvl = matched_indexes[:, 0] == lvl if torch.any(ids_lvl): cur_level_factor = 2 ** lvl if self.bipyramid_on else 1 for anc in range(self.num_anchors): ids_lvl_anchor = ids_lvl & (matched_indexes[:, 4] == anc) if torch.any(ids_lvl_anchor): gt_masks[lvl][anc].append( targets_im[ gt_fg_matched_inds[ids_lvl_anchor] ].gt_masks.crop_and_resize( fg_anchors[ids_lvl_anchor].tensor, self.mask_sizes[anc] * cur_level_factor, ) ) gt_mask_inds_lvl_anc = matched_indexes[ids_lvl_anchor, 1:4] gt_mask_inds_lvl_anc[:, 0] = i gt_mask_inds[lvl][anc].append(gt_mask_inds_lvl_anc) gt_classes.append(gt_classes_i) gt_classes = cat(gt_classes) gt_valid_inds = gt_classes >= 0 gt_fg_inds = gt_valid_inds & (gt_classes < self.num_classes) gt_classes_target = torch.zeros( (gt_classes.shape[0], self.num_classes), dtype=torch.float32, device=self.device ) gt_classes_target[gt_fg_inds, gt_classes[gt_fg_inds]] = 1 gt_deltas = cat(gt_deltas) if gt_deltas else None gt_masks = [[cat(mla) if mla else None for mla in ml] for ml in gt_masks] gt_mask_inds = [[cat(ila) if ila else None for ila in il] for il in gt_mask_inds] return ( (gt_classes_target, gt_valid_inds), (gt_deltas, gt_fg_inds), (gt_masks, gt_mask_inds), num_fg, ) def inference(self, pred_logits, pred_deltas, pred_masks, anchors, indexes, images): assert len(anchors) == len(images) results = [] pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] pred_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_deltas] pred_logits = cat(pred_logits, dim=1) pred_deltas = cat(pred_deltas, dim=1) for img_idx, (anchors_im, indexes_im) in enumerate(zip(anchors, indexes)): image_size = images.image_sizes[img_idx] logits_im = pred_logits[img_idx] deltas_im = pred_deltas[img_idx] if self.mask_on: masks_im = [[mla[img_idx] for mla in ml] for ml in pred_masks] else: masks_im = [None] * self.num_levels results_im = self.inference_single_image( logits_im, deltas_im, masks_im, Boxes.cat(anchors_im), cat(indexes_im), tuple(image_size), ) results.append(results_im) return results def inference_single_image( self, pred_logits, pred_deltas, pred_masks, anchors, indexes, image_size ): pred_logits = pred_logits.flatten().sigmoid_() logits_top_idxs = torch.where(pred_logits > self.score_threshold)[0] num_topk = min(self.topk_candidates, logits_top_idxs.shape[0]) pred_prob, topk_idxs = pred_logits[logits_top_idxs].sort(descending=True) pred_prob = pred_prob[:num_topk] top_idxs = logits_top_idxs[topk_idxs[:num_topk]] cls_idxs = top_idxs % self.num_classes top_idxs //= self.num_classes pred_boxes = self.box2box_transform.apply_deltas( pred_deltas[top_idxs], anchors[top_idxs].tensor ) keep = batched_nms(pred_boxes, pred_prob, cls_idxs, self.nms_threshold) keep = keep[: self.detections_im] results = Instances(image_size) results.pred_boxes = Boxes(pred_boxes[keep]) results.scores = pred_prob[keep] results.pred_classes = cls_idxs[keep] result_masks, result_anchors = [], None if self.mask_on: top_indexes = indexes[top_idxs] top_anchors = anchors[top_idxs] result_indexes = top_indexes[keep] result_anchors = top_anchors[keep] for lvl, _, h, w, anc in result_indexes.tolist(): cur_size = self.mask_sizes[anc] * (2 ** lvl if self.bipyramid_on else 1) result_masks.append( torch.sigmoid(pred_masks[lvl][anc][:, h, w].view(1, cur_size, cur_size)) ) return results, (result_masks, result_anchors) def preprocess_image(self, batched_inputs): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.backbone.size_divisibility) return images class TensorMaskHead(nn.Module): def __init__(self, cfg, num_levels, num_anchors, mask_sizes, input_shape: List[ShapeSpec]): super().__init__() self.in_features = cfg.MODEL.TENSOR_MASK.IN_FEATURES in_channels = input_shape[0].channels num_classes = cfg.MODEL.TENSOR_MASK.NUM_CLASSES cls_channels = cfg.MODEL.TENSOR_MASK.CLS_CHANNELS num_convs = cfg.MODEL.TENSOR_MASK.NUM_CONVS bbox_channels = cfg.MODEL.TENSOR_MASK.BBOX_CHANNELS self.mask_on = cfg.MODEL.MASK_ON self.mask_sizes = mask_sizes mask_channels = cfg.MODEL.TENSOR_MASK.MASK_CHANNELS self.align_on = cfg.MODEL.TENSOR_MASK.ALIGNED_ON self.bipyramid_on = cfg.MODEL.TENSOR_MASK.BIPYRAMID_ON cls_subnet = [] cur_channels = in_channels for _ in range(num_convs): cls_subnet.append( nn.Conv2d(cur_channels, cls_channels, kernel_size=3, stride=1, padding=1) ) cur_channels = cls_channels cls_subnet.append(nn.ReLU()) self.cls_subnet = nn.Sequential(*cls_subnet) self.cls_score = nn.Conv2d( cur_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 ) modules_list = [self.cls_subnet, self.cls_score] bbox_subnet = [] cur_channels = in_channels for _ in range(num_convs): bbox_subnet.append( nn.Conv2d(cur_channels, bbox_channels, kernel_size=3, stride=1, padding=1) ) cur_channels = bbox_channels bbox_subnet.append(nn.ReLU()) self.bbox_subnet = nn.Sequential(*bbox_subnet) self.bbox_pred = nn.Conv2d( cur_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1 ) modules_list.extend([self.bbox_subnet, self.bbox_pred]) if self.mask_on: mask_subnet = [] cur_channels = in_channels for _ in range(num_convs): mask_subnet.append( nn.Conv2d(cur_channels, mask_channels, kernel_size=3, stride=1, padding=1) ) cur_channels = mask_channels mask_subnet.append(nn.ReLU()) self.mask_subnet = nn.Sequential(*mask_subnet) modules_list.append(self.mask_subnet) for mask_size in self.mask_sizes: cur_mask_module = "mask_pred_%02d" % mask_size self.add_module( cur_mask_module, nn.Conv2d( cur_channels, mask_size * mask_size, kernel_size=1, stride=1, padding=0 ), ) modules_list.append(getattr(self, cur_mask_module)) if self.align_on: if self.bipyramid_on: for lvl in range(num_levels): cur_mask_module = "align2nat_%02d" % lvl lambda_val = 2 ** lvl setattr(self, cur_mask_module, SwapAlign2Nat(lambda_val)) mask_fuse = [ nn.Conv2d(cur_channels, cur_channels, kernel_size=3, stride=1, padding=1), nn.ReLU(), ] self.mask_fuse = nn.Sequential(*mask_fuse) modules_list.append(self.mask_fuse) else: self.align2nat = SwapAlign2Nat(1) for modules in modules_list: for layer in modules.modules(): if isinstance(layer, nn.Conv2d): torch.nn.init.normal_(layer.weight, mean=0, std=0.01) torch.nn.init.constant_(layer.bias, 0) bias_value = -math.log((1 - 0.01) / 0.01) torch.nn.init.constant_(self.cls_score.bias, bias_value)
Apache License 2.0
microsoft/seismic-deeplearning
interpretation/deepseismic_interpretation/azureml_pipelines/base_pipeline.py
DeepSeismicAzMLPipeline._setup_datareference
python
def _setup_datareference(self, name, path): input_data = DataReference(datastore=self.blob_ds, data_reference_name=name, path_on_datastore=path) return input_data
helper function to setup a datareference object in AzureML. :param str name: [required] name of the data reference\ :param str path: [required] path on the datastore where the data lives. :returns: input_data :rtype: DataReference
https://github.com/microsoft/seismic-deeplearning/blob/3f74face5d087a3947419a698a6181733d8be8fd/interpretation/deepseismic_interpretation/azureml_pipelines/base_pipeline.py#L173-L183
from azureml.core import Datastore, Workspace, RunConfiguration from azureml.core.model import Model from azureml.core.compute import AmlCompute, ComputeTarget from azureml.core.dataset import Dataset from azureml.core.experiment import Experiment from azureml.pipeline.steps import PythonScriptStep, MpiStep from azureml.pipeline.core import Pipeline, PipelineData, StepSequence from azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig from azureml.core.runconfig import DEFAULT_GPU_IMAGE from azureml.core.conda_dependencies import CondaDependencies from msrest.exceptions import HttpOperationError from azureml.data.data_reference import DataReference from azureml.core import Environment from dotenv import load_dotenv import os import re from abc import ABC, abstractmethod import json class DeepSeismicAzMLPipeline(ABC): def __init__(self, pipeline_config, ws_config=None): self.ws = Workspace.from_config(path=ws_config) self._load_environment() self._load_config(pipeline_config) self.steps = [] self.pipeline_tags = None self.last_output_data = None def _load_config(self, config_path): try: with open(config_path, "r") as f: self.config = json.load(f) except Exception as e: raise Exception("Was unable to load pipeline config file. {}".format(e)) @abstractmethod def construct_pipeline(self): raise NotImplementedError("construct_pipeline is not implemented") @abstractmethod def _setup_steps(self): raise NotImplementedError("setup_steps is not implemented") def _load_environment(self): load_dotenv() self.account_name = os.getenv("BLOB_ACCOUNT_NAME") self.container_name = os.getenv("BLOB_CONTAINER_NAME") self.account_key = os.getenv("BLOB_ACCOUNT_KEY") self.blob_sub_id = os.getenv("BLOB_SUB_ID") self.comp_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME") self.comp_min_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MIN_NODES") self.comp_max_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MAX_NODES") self.comp_vm_size = os.environ.get("AML_COMPUTE_CLUSTER_SKU") def _setup_model(self, model_name, model_path=None): models = Model.list(self.ws, name=model_name) for model in models: if model.name == model_name: self.model = model print("Found model: " + self.model.name) break if model_path is not None: self.model = Model.register(model_path=model_path, model_name=model_name, workspace=self.ws) if self.model is None: raise Exception( """no model was found or registered. Ensure that you have a model registered in this workspace or that you passed the path of a local model""" ) def _setup_datastore(self, blob_dataset_name, output_path=None): try: self.blob_ds = Datastore.get(self.ws, blob_dataset_name) print("Found Blob Datastore with name: %s" % blob_dataset_name) except HttpOperationError: self.blob_ds = Datastore.register_azure_blob_container( workspace=self.ws, datastore_name=blob_dataset_name, account_name=self.account_name, container_name=self.container_name, account_key=self.account_key, subscription_id=self.blob_sub_id, ) print("Registered blob datastore with name: %s" % blob_dataset_name) if output_path is not None: self.output_dir = PipelineData( name="output", datastore=self.ws.get_default_datastore(), output_path_on_compute=output_path ) def _setup_dataset(self, ds_name, data_paths): self.named_ds = [] count = 1 for data_path in data_paths: curr_name = ds_name + str(count) path_on_datastore = self.blob_ds.path(data_path) input_ds = Dataset.File.from_files(path=path_on_datastore, validate=False) try: registered_ds = input_ds.register(workspace=self.ws, name=curr_name, create_new_version=True) except Exception as e: n, v = self._parse_exception(e) registered_ds = Dataset.get_by_name(self.ws, name=n, version=v) self.named_ds.append(registered_ds.as_named_input(curr_name)) count = count + 1
MIT License
tehkillerbee/mopidy-tidal
mopidy_tidal/lru_cache.py
SearchKey.fix_query
python
def fix_query(query): query.pop("track_no", None) return query
Removes some query parameters that otherwise will lead to a cache miss. Eg: 'track_no' since we can't query TIDAL for a specific album's track. :param query: query dictionary :return: sanitized query dictionary
https://github.com/tehkillerbee/mopidy-tidal/blob/83ad5c4363c3c578dc7c67a9ff8ac49bec212443/mopidy_tidal/lru_cache.py#L83-L91
from __future__ import unicode_literals import logging from collections import OrderedDict logger = logging.getLogger(__name__) class LruCache(OrderedDict): def __init__(self, max_size=1024): if max_size <= 0: raise ValueError('Invalid size') OrderedDict.__init__(self) self._max_size = max_size self._check_limit() def get_max_size(self): return self._max_size def hit(self, key): if key in self: val = self[key] self[key] = val return val return None def __setitem__(self, key, value): if key in self: del self[key] OrderedDict.__setitem__(self, key, value) self._check_limit() def _check_limit(self): while len(self) > self._max_size: k = list(self)[0] del self[k] class SearchCache(LruCache): def __init__(self, func): super(SearchCache, self).__init__() self._func = func def __call__(self, *args, **kwargs): key = SearchKey(**kwargs) cached_result = self.hit(key) logger.info("Search cache miss" if cached_result is None else "Search cache hit") if cached_result is None: cached_result = self._func(*args, **kwargs) self[key] = cached_result return cached_result class SearchKey(object): def __init__(self, **kwargs): fixed_query = self.fix_query(kwargs["query"]) self._query = tuple(sorted(fixed_query.items())) self._exact = kwargs["exact"] self._hash = None def __hash__(self): if self._hash is None: self._hash = hash(self._exact) self._hash ^= hash(repr(self._query)) return self._hash def __eq__(self, other): if not isinstance(other, SearchKey): return False return self._exact == other._exact and self._query == other._query @staticmethod
Apache License 2.0
3ll3d00d/beqdesigner
src/main/python/acoustics/smooth.py
exact_center_frequency
python
def exact_center_frequency(frequency=None, fraction=1, n=None, ref=REFERENCE_FREQUENCY): if frequency is not None: n = index_of_frequency(frequency, fraction=fraction, ref=ref) return iec_61260_1_2014.exact_center_frequency(n, fraction=fraction, ref=ref)
Exact center frequency. :param frequency: Frequency within the band. :param fraction: Band designator. :param n: Index of band. :param ref: Reference frequency. :return: Exact center frequency for the given frequency or band index. .. seealso:: :func:`iec_61260_1_2014.exact_center_frequency` .. seealso:: :func:`iec_61260_1_2014.index_of_frequency`
https://github.com/3ll3d00d/beqdesigner/blob/f0901533a2394a1f9890cd8a0768e90daf53dc2d/src/main/python/acoustics/smooth.py#L9-L24
import numpy as np from acoustics.standards.iec_61260_1_2014 import index_of_frequency, REFERENCE_FREQUENCY from acoustics.standards import iec_61260_1_2014 REFERENCE_PRESSURE = 2.0e-5
MIT License
unofficial-memsource/memsource-cli-client
memsource_cli/models/edit_project_security_settings_dto.py
EditProjectSecuritySettingsDto.users_may_set_auto_propagation
python
def users_may_set_auto_propagation(self): return self._users_may_set_auto_propagation
Gets the users_may_set_auto_propagation of this EditProjectSecuritySettingsDto. # noqa: E501 Default: true # noqa: E501 :return: The users_may_set_auto_propagation of this EditProjectSecuritySettingsDto. # noqa: E501 :rtype: bool
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/edit_project_security_settings_dto.py#L275-L283
import pprint import re import six class EditProjectSecuritySettingsDto(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'translation_memory_download_enabled': 'bool', 'web_editor_enabled_for_linguists': 'bool', 'show_user_data_to_linguists': 'bool', 'email_notifications': 'bool', 'strict_workflow_finish': 'bool', 'use_vendors': 'bool', 'linguists_may_edit_locked_segments': 'bool', 'linguists_may_set_auto_propagation': 'bool', 'users_may_set_auto_propagation': 'bool' } attribute_map = { 'translation_memory_download_enabled': 'translationMemoryDownloadEnabled', 'web_editor_enabled_for_linguists': 'webEditorEnabledForLinguists', 'show_user_data_to_linguists': 'showUserDataToLinguists', 'email_notifications': 'emailNotifications', 'strict_workflow_finish': 'strictWorkflowFinish', 'use_vendors': 'useVendors', 'linguists_may_edit_locked_segments': 'linguistsMayEditLockedSegments', 'linguists_may_set_auto_propagation': 'linguistsMaySetAutoPropagation', 'users_may_set_auto_propagation': 'usersMaySetAutoPropagation' } def __init__(self, translation_memory_download_enabled=None, web_editor_enabled_for_linguists=None, show_user_data_to_linguists=None, email_notifications=None, strict_workflow_finish=None, use_vendors=None, linguists_may_edit_locked_segments=None, linguists_may_set_auto_propagation=None, users_may_set_auto_propagation=None): self._translation_memory_download_enabled = None self._web_editor_enabled_for_linguists = None self._show_user_data_to_linguists = None self._email_notifications = None self._strict_workflow_finish = None self._use_vendors = None self._linguists_may_edit_locked_segments = None self._linguists_may_set_auto_propagation = None self._users_may_set_auto_propagation = None self.discriminator = None if translation_memory_download_enabled is not None: self.translation_memory_download_enabled = translation_memory_download_enabled if web_editor_enabled_for_linguists is not None: self.web_editor_enabled_for_linguists = web_editor_enabled_for_linguists if show_user_data_to_linguists is not None: self.show_user_data_to_linguists = show_user_data_to_linguists if email_notifications is not None: self.email_notifications = email_notifications if strict_workflow_finish is not None: self.strict_workflow_finish = strict_workflow_finish if use_vendors is not None: self.use_vendors = use_vendors if linguists_may_edit_locked_segments is not None: self.linguists_may_edit_locked_segments = linguists_may_edit_locked_segments if linguists_may_set_auto_propagation is not None: self.linguists_may_set_auto_propagation = linguists_may_set_auto_propagation if users_may_set_auto_propagation is not None: self.users_may_set_auto_propagation = users_may_set_auto_propagation @property def translation_memory_download_enabled(self): return self._translation_memory_download_enabled @translation_memory_download_enabled.setter def translation_memory_download_enabled(self, translation_memory_download_enabled): self._translation_memory_download_enabled = translation_memory_download_enabled @property def web_editor_enabled_for_linguists(self): return self._web_editor_enabled_for_linguists @web_editor_enabled_for_linguists.setter def web_editor_enabled_for_linguists(self, web_editor_enabled_for_linguists): self._web_editor_enabled_for_linguists = web_editor_enabled_for_linguists @property def show_user_data_to_linguists(self): return self._show_user_data_to_linguists @show_user_data_to_linguists.setter def show_user_data_to_linguists(self, show_user_data_to_linguists): self._show_user_data_to_linguists = show_user_data_to_linguists @property def email_notifications(self): return self._email_notifications @email_notifications.setter def email_notifications(self, email_notifications): self._email_notifications = email_notifications @property def strict_workflow_finish(self): return self._strict_workflow_finish @strict_workflow_finish.setter def strict_workflow_finish(self, strict_workflow_finish): self._strict_workflow_finish = strict_workflow_finish @property def use_vendors(self): return self._use_vendors @use_vendors.setter def use_vendors(self, use_vendors): self._use_vendors = use_vendors @property def linguists_may_edit_locked_segments(self): return self._linguists_may_edit_locked_segments @linguists_may_edit_locked_segments.setter def linguists_may_edit_locked_segments(self, linguists_may_edit_locked_segments): self._linguists_may_edit_locked_segments = linguists_may_edit_locked_segments @property def linguists_may_set_auto_propagation(self): return self._linguists_may_set_auto_propagation @linguists_may_set_auto_propagation.setter def linguists_may_set_auto_propagation(self, linguists_may_set_auto_propagation): self._linguists_may_set_auto_propagation = linguists_may_set_auto_propagation @property
Apache License 2.0
mariocj89/dothub
dothub/utils.py
decode_permissions
python
def decode_permissions(permissions_dict): if permissions_dict.get("admin"): return "admin" elif permissions_dict.get("push"): return "push" elif permissions_dict.get("pull"): return "pull" else: raise ValueError("Unexpected permission options: {}" .format(permissions_dict))
Given a permissions dict, returns the highest permission
https://github.com/mariocj89/dothub/blob/bcfdcc5a076e48a73c4e0827c56431522e4cc4ba/dothub/utils.py#L23-L33
import logging import yaml import git import re from deepdiff import DeepDiff import click import copy from yaml import Loader, SafeLoader LOG = logging.getLogger(__name__) def construct_yaml_str(self, node): return self.construct_scalar(node) Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
MIT License
mattvonrocketstein/smash
smashlib/ipy3x/parallel/engine/engine.py
EngineFactory.register
python
def register(self): self.log.info("Registering with controller at %s" % self.url) ctx = self.context connect, maybe_tunnel = self.init_connector() reg = ctx.socket(zmq.DEALER) reg.setsockopt(zmq.IDENTITY, self.bident) connect(reg, self.url) self.registrar = zmqstream.ZMQStream(reg, self.loop) content = dict(uuid=self.ident) self.registrar.on_recv( lambda msg: self.complete_registration(msg, connect, maybe_tunnel)) self.session.send( self.registrar, "registration_request", content=content)
send the registration_request
https://github.com/mattvonrocketstein/smash/blob/98acdc27ab72ca80d9a7f63a54c0d52f126a8009/smashlib/ipy3x/parallel/engine/engine.py#L138-L154
from __future__ import print_function import sys import time from getpass import getpass import zmq from zmq.eventloop import ioloop, zmqstream from IPython.utils.localinterfaces import localhost from IPython.utils.traitlets import ( Instance, Dict, Integer, Type, Float, Unicode, CBytes, Bool ) from IPython.utils.py3compat import cast_bytes from IPython.parallel.controller.heartmonitor import Heart from IPython.parallel.factory import RegistrationFactory from IPython.parallel.util import disambiguate_url from IPython.kernel.zmq.ipkernel import IPythonKernel as Kernel from IPython.kernel.zmq.kernelapp import IPKernelApp class EngineFactory(RegistrationFactory): out_stream_factory = Type('IPython.kernel.zmq.iostream.OutStream', config=True, help="""The OutStream for handling stdout/err. Typically 'IPython.kernel.zmq.iostream.OutStream'""") display_hook_factory = Type('IPython.kernel.zmq.displayhook.ZMQDisplayHook', config=True, help="""The class for handling displayhook. Typically 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'""") location = Unicode(config=True, help="""The location (an IP address) of the controller. This is used for disambiguating URLs, to determine whether loopback should be used to connect or the public address.""") timeout = Float(5.0, config=True, help="""The time (in seconds) to wait for the Controller to respond to registration requests before giving up.""") max_heartbeat_misses = Integer(50, config=True, help="""The maximum number of times a check for the heartbeat ping of a controller can be missed before shutting down the engine. If set to 0, the check is disabled.""") sshserver = Unicode(config=True, help="""The SSH server to use for tunneling connections to the Controller.""") sshkey = Unicode(config=True, help="""The SSH private key file to use when tunneling connections to the Controller.""") paramiko = Bool(sys.platform == 'win32', config=True, help="""Whether to use paramiko instead of openssh for tunnels.""") @property def tunnel_mod(self): from zmq.ssh import tunnel return tunnel connection_info = Dict() user_ns = Dict() id = Integer(allow_none=True) registrar = Instance('zmq.eventloop.zmqstream.ZMQStream') kernel = Instance(Kernel) hb_check_period = Integer() _hb_last_pinged = 0.0 _hb_last_monitored = 0.0 _hb_missed_beats = 0 _hb_listener = None bident = CBytes() ident = Unicode() def _ident_changed(self, name, old, new): self.bident = cast_bytes(new) using_ssh = Bool(False) def __init__(self, **kwargs): super(EngineFactory, self).__init__(**kwargs) self.ident = self.session.session def init_connector(self): self.using_ssh = bool(self.sshkey or self.sshserver) if self.sshkey and not self.sshserver: self.sshserver = self.url.split('://')[1].split(':')[0] if self.using_ssh: if self.tunnel_mod.try_passwordless_ssh(self.sshserver, self.sshkey, self.paramiko): password = False else: password = getpass("SSH Password for %s: " % self.sshserver) else: password = False def connect(s, url): url = disambiguate_url(url, self.location) if self.using_ssh: self.log.debug( "Tunneling connection to %s via %s", url, self.sshserver) return self.tunnel_mod.tunnel_connection(s, url, self.sshserver, keyfile=self.sshkey, paramiko=self.paramiko, password=password, ) else: return s.connect(url) def maybe_tunnel(url): url = disambiguate_url(url, self.location) if self.using_ssh: self.log.debug( "Tunneling connection to %s via %s", url, self.sshserver) url, tunnelobj = self.tunnel_mod.open_tunnel(url, self.sshserver, keyfile=self.sshkey, paramiko=self.paramiko, password=password, ) return str(url) return connect, maybe_tunnel
MIT License
google/dl_bounds
dl_bounds/src/models.py
Model.create_optimizer
python
def create_optimizer(self, learning_rate, name="sgd", momentum=0, weight_decay=0): if name == "sgd": if momentum == 0: tf.logging.info("Creating SGD optimizer, lr = %s", learning_rate) self.opt = tf.train.GradientDescentOptimizer(learning_rate) else: tf.logging.info("Creating SGD optimizer, lr = %s, momentum = %s", learning_rate, momentum) self.opt = tf.train.MomentumOptimizer(learning_rate, momentum) elif name == "adam": tf.logging.info("Creating Adam optimizer, lr = %s", learning_rate) self.opt = tf.train.AdamOptimizer(learning_rate) total_loss = self.mean_loss if weight_decay > 0: tf.logging.info("Using weight decay = %s", weight_decay) self.weight_decay_op = tf.multiply( tf.nn.l2_loss(self.weights), weight_decay, name="L2_regularizer") total_loss += self.weight_decay_op self.minimizer = self.opt.minimize(loss=total_loss) self.grad = tf.gradients(total_loss, self.weights)[0]
Creates optimizer for the model. Args: learning_rate: learning rate. name: type of optimizer. Supported: sgd, adam. momentum: Momentum. weight_decay: Weight decay.
https://github.com/google/dl_bounds/blob/b38fbd73f30d2fd8d1b57ad8706c07a223689365/dl_bounds/src/models.py#L363-L399
from __future__ import absolute_import from __future__ import division from __future__ import print_function from copy import copy from dl_bounds.src.metrics import compute_spectral_complexity from dl_bounds.src.metrics import emp_entropy from dl_bounds.src.metrics import PathL2Norm import numpy as np from scipy.spatial.distance import pdist import tensorflow as tf class Model(object): def __init__(self): self.reset() def reset(self): self.net_in = None self.net_out = None self.label_placeholder = None self.get_weights = None self.pooling_divisor = 1.0 self.input_shape = None self.weight_cursor = 0 self.layer_coords = [] self.layer_inputs = [] self.pflug_diagnostic = False self.pflug_diagnostic_track = [] self.pflug_cos_diagnostic_track = [] self.weight_decay_op = None def add_input(self, input_shape): self.input_shape = input_shape self.net_in = tf.placeholder(tf.float32, shape=[None, np.prod(input_shape)]) with tf.name_scope("input_reshape"): self.net_out = tf.reshape(self.net_in, [-1] + input_shape) self.layer_inputs = [self.net_in] def add_output(self, num_outputs): self.label_placeholder = tf.placeholder( tf.float32, shape=[None, num_outputs]) self.add_fc("output", num_outputs) self.layer_inputs.append(self.net_out) def add_conv2d(self, name, ksize, kernels, stride=1): with tf.name_scope(name): chan = int(self.net_out.shape[-1]) shape = [ksize, ksize, chan, kernels] weights_conv = self.get_weights(shape) b_conv = self.get_weights([shape[-1]]) h_conv = tf.nn.conv2d( self.net_out, weights_conv, strides=[1, stride, stride, 1], padding="SAME") self.net_out = h_conv + b_conv def add_pool(self, name, size, stride=2): with tf.name_scope(name): self.net_out = tf.nn.max_pool( self.net_out, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding="SAME") self.pooling_divisor *= stride def add_relu(self): self.net_out = tf.nn.relu(self.net_out) self.layer_inputs.append(self.net_out) def flatten(self): prev_num_kernels = int(self.net_out.shape[-1]) shrunk_input_width = int(self.input_shape[0] / self.pooling_divisor) shrunk_input_height = int(self.input_shape[1] / self.pooling_divisor) self.net_out = tf.reshape(self.net_out, [-1, (shrunk_input_width * shrunk_input_height * prev_num_kernels)]) def add_fc(self, name, size): with tf.name_scope(name): prev_dim = int(self.net_out.shape[-1]) self.layer_coords.append((self.weight_cursor, prev_dim, size)) weights_fc = self.get_weights([prev_dim, size]) b_fc = self.get_weights([size]) self.net_out = tf.matmul(self.net_out, weights_fc) + b_fc def get_fc_weights(self): weights = self.weights.eval() layers = [] for (cursor, d1, d2) in self.layer_coords: layer_weights = weights[cursor:cursor + d1 * d2] layers.append(layer_weights.reshape((d1, d2))) return layers def path_l2_norm(self): layers = self.get_fc_weights() norm = PathL2Norm() return norm(layers) def spectral_complexity(self): layers = self.get_fc_weights() return compute_spectral_complexity(layers) def prod_of_frob_layers(self): layers = self.get_fc_weights() return np.prod([np.linalg.norm(weights, "fro") for weights in layers]) def compute_layer_distortions(self, instances, y, metric="euclidean"): feed_dict = {self.net_in: instances, self.label_placeholder: y} x_dists = pdist(instances, metric=metric) x_mean = np.mean(x_dists) results = dict( ratio_of_mean_shallow_embeddings=[], ratio_of_mean_deep_embeddings=[], mean_shallow_distortion=[], std_shallow_distortion=[], mean_deep_distortion=[], std_deep_distortion=[]) for i in range(len(self.layer_inputs) - 1): embedding_inputs = self.layer_inputs[i].eval(feed_dict=feed_dict) embedding = self.layer_inputs[i + 1].eval(feed_dict=feed_dict) embedding_dists = pdist(embedding, metric=metric) embedding_mean = np.mean(embedding_dists) embedding_inputs_dists = pdist(embedding_inputs, metric=metric) embedding_inputs_mean = np.mean(embedding_inputs_dists) shallow_distortions = embedding_dists / embedding_inputs_dists deep_distortions = embedding_dists / x_dists results["ratio_of_mean_shallow_embeddings"].append( embedding_mean / embedding_inputs_mean) results["ratio_of_mean_deep_embeddings"].append(embedding_mean / x_mean) results["mean_shallow_distortion"].append(np.mean(shallow_distortions)) results["std_shallow_distortion"].append(np.std(shallow_distortions)) results["mean_deep_distortion"].append(np.mean(deep_distortions)) results["std_deep_distortion"].append(np.std(deep_distortions)) return results def sharpness(self, dataset, batch_size, learning_rate, init_stddev, passes, optimizer, alpha): model = copy(self) memo_weights = self.weights.eval() abs_memo_weights = np.abs(memo_weights) w = tf.Variable(memo_weights) v = tf.Variable(tf.zeros(len(memo_weights))) def _allocate_weights(_): model.weights = w + v model.get_weights = model.get_weight_chunk model.weight_cursor = 0 model.allocate_weights = _allocate_weights model.initialize( init_stddev=init_stddev, learning_rate=learning_rate, seed=1, optimizer=optimizer, pflug_diagnostic=False) tf.get_default_session().run([w.initializer, v.initializer]) opt = tf.train.GradientDescentOptimizer(learning_rate).minimize( loss=tf.negative(model.mean_loss), var_list=[v]) if dataset.size < batch_size: batch_size = dataset.size for i_pass in range(passes): dataset.reset_and_reshuffle(i_pass) for _ in range(int(dataset.size / batch_size)): x_mb, y_mb = dataset.read_next(batch_size) feed_dict = {model.net_in: x_mb, model.label_placeholder: y_mb} opt.run(feed_dict=feed_dict) v_np = v.eval() trunc_ix = np.abs(v_np) > alpha * (abs_memo_weights + 1) if trunc_ix.any(): v_np[trunc_ix] = alpha * (memo_weights[trunc_ix] + 1) v.load(v_np) emp_risk_adv = model.mean_loss_on_dataset(dataset) emp_risk = self.mean_loss_on_dataset(dataset) sharpness = (emp_risk_adv - emp_risk) / (1.0 + emp_risk) return sharpness def weight_variance(self): weights = self.weights.eval() return np.var(weights) def weight_entropy(self): weights = self.weights.eval() return emp_entropy(weights - weights.min()) def build(self): raise NotImplementedError def count_parameters(self): def _get_dummy_weights(shape): return tf.Variable(tf.zeros(shape)) get_weights = self.get_weights self.get_weights = _get_dummy_weights g = tf.Graph() with g.as_default(): self.build() self.reset() total_params = 0 for var in tf.trainable_variables(): total_params += int(np.prod(var.shape)) self.get_weights = get_weights return total_params def get_weight_chunk(self, chunk_shape): n = np.prod(chunk_shape) if (self.weight_cursor + n) <= self.weights.get_shape()[0]: w = tf.reshape( tf.slice(self.weights, begin=[self.weight_cursor], size=[n]), chunk_shape) self.weight_cursor += n return w else: raise Exception("Used up all allocated weights.") def allocate_weights(self, init_stddev): self.num_params = self.count_parameters() tf.logging.info("Allocating weights [%s] ~ truncated_normal(stddev=%s)", self.num_params, init_stddev) init = tf.truncated_normal([self.num_params], stddev=init_stddev) self.weights = tf.Variable(init) self.get_weights = self.get_weight_chunk self.weight_cursor = 0 def allocate_weights_from_array(self, array): self.num_params = self.count_parameters() tf.logging.info("Allocating weights [%s] provided array", self.num_params) init = tf.constant(array) self.weights = tf.Variable(init) self.get_weights = self.get_weight_chunk self.weight_cursor = 0 def create_losses(self): tf.logging.info("Creating mean cross entropy loss") self.mean_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( labels=self.label_placeholder, logits=self.net_out)) correct_pred = tf.equal( tf.argmax(self.net_out, 1), tf.argmax(self.label_placeholder, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) self.multi_loss = tf.losses.softmax_cross_entropy( onehot_labels=self.label_placeholder, logits=self.net_out, reduction=tf.losses.Reduction.NONE)
Apache License 2.0
flyteorg/flytekit
flytekit/clis/flyte_cli/main.py
_get_io_string
python
def _get_io_string(literal_map, verbose=False): value_dict = _type_helpers.unpack_literal_map_to_sdk_object(literal_map) if value_dict: return "\n" + "\n".join( "{:30}: {}".format( k, _prefix_lines( "{:30} ".format(""), v.verbose_string() if verbose else v.short_string(), ), ) for k, v in _six.iteritems(value_dict) ) else: return "(None)"
:param flytekit.models.literals.LiteralMap literal_map: :param bool verbose: :rtype: Text
https://github.com/flyteorg/flytekit/blob/6c032035563ae645b0b93558b3fe3362080057ea/flytekit/clis/flyte_cli/main.py#L104-L123
import configparser as _configparser import importlib as _importlib import os import os as _os import stat as _stat import sys as _sys from typing import Callable, Dict, List, Tuple, Union import click as _click import requests as _requests import six as _six from flyteidl.admin import launch_plan_pb2 as _launch_plan_pb2 from flyteidl.admin import task_pb2 as _task_pb2 from flyteidl.admin import workflow_pb2 as _workflow_pb2 from flyteidl.core import identifier_pb2 as _identifier_pb2 from flyteidl.core import literals_pb2 as _literals_pb2 from flyteidl.core import tasks_pb2 as _core_tasks_pb2 from flyteidl.core import workflow_pb2 as _core_workflow_pb2 from google.protobuf.json_format import MessageToJson from google.protobuf.pyext.cpp_message import GeneratedProtocolMessageType as _GeneratedProtocolMessageType from flytekit import __version__ from flytekit.clients import friendly as _friendly_client from flytekit.clis.helpers import construct_literal_map_from_parameter_map as _construct_literal_map_from_parameter_map from flytekit.clis.helpers import construct_literal_map_from_variable_map as _construct_literal_map_from_variable_map from flytekit.clis.helpers import hydrate_registration_parameters from flytekit.clis.helpers import parse_args_into_dict as _parse_args_into_dict from flytekit.common import launch_plan as _launch_plan_common from flytekit.common import utils as _utils from flytekit.common import workflow_execution as _workflow_execution_common from flytekit.common.core import identifier as _identifier from flytekit.common.exceptions import user as _user_exceptions from flytekit.common.tasks import task as _tasks_common from flytekit.common.types import helpers as _type_helpers from flytekit.common.utils import load_proto_from_file as _load_proto_from_file from flytekit.configuration import auth as _auth_config from flytekit.configuration import platform as _platform_config from flytekit.configuration import set_flyte_config_file from flytekit.interfaces.data import data_proxy as _data_proxy from flytekit.interfaces.data.data_proxy import Data from flytekit.models import common as _common_models from flytekit.models import filters as _filters from flytekit.models import launch_plan as _launch_plan from flytekit.models import literals as _literals from flytekit.models import named_entity as _named_entity from flytekit.models.admin import common as _admin_common from flytekit.models.common import AuthRole as _AuthRole from flytekit.models.common import RawOutputDataConfig as _RawOutputDataConfig from flytekit.models.core import execution as _core_execution_models from flytekit.models.core import identifier as _core_identifier from flytekit.models.execution import ExecutionMetadata as _ExecutionMetadata from flytekit.models.execution import ExecutionSpec as _ExecutionSpec from flytekit.models.matchable_resource import ClusterResourceAttributes as _ClusterResourceAttributes from flytekit.models.matchable_resource import ExecutionClusterLabel as _ExecutionClusterLabel from flytekit.models.matchable_resource import ExecutionQueueAttributes as _ExecutionQueueAttributes from flytekit.models.matchable_resource import MatchableResource as _MatchableResource from flytekit.models.matchable_resource import MatchingAttributes as _MatchingAttributes from flytekit.models.matchable_resource import PluginOverride as _PluginOverride from flytekit.models.matchable_resource import PluginOverrides as _PluginOverrides from flytekit.models.project import Project as _Project from flytekit.models.schedule import Schedule as _Schedule from flytekit.tools.fast_registration import get_additional_distribution_loc as _get_additional_distribution_loc try: import urllib.parse as _urlparse except ImportError: import urlparse as _urlparse _tt = _six.text_type _default_config_file_dir = ".flyte" _default_config_file_name = "config" def _welcome_message(): _click.secho("Welcome to Flyte CLI! Version: {}".format(_tt(__version__)), bold=True) def _get_user_filepath_home(): return _os.path.expanduser("~") def _get_config_file_path(): home = _get_user_filepath_home() return _os.path.join(home, _default_config_file_dir, _default_config_file_name) def _detect_default_config_file(): config_file = _get_config_file_path() if _get_user_filepath_home() and _os.path.exists(config_file): _click.secho("Using default config file at {}".format(_tt(config_file)), fg="blue") set_flyte_config_file(config_file_path=config_file) else: _click.secho( """Config file not found at default location, relying on environment variables instead. To setup your config file run 'flyte-cli setup-config'""", fg="blue", )
Apache License 2.0
polixir/zoopt
zoopt/solution.py
Solution.__init__
python
def __init__(self, x=[], value=nan, resample_value=None, attach=None, post_attach=None, is_in_possible_solution=False, no=None): self.__x = x self.__value = value self.__resample_value = resample_value self.__attach = attach self.__post_attach = post_attach self.__is_in_possible_solution = is_in_possible_solution self.__no = no return
Initialization. :param x: a list :param value: objective value :param resample_value: re-evaluated value. This is a meaningful parameter only when using the SSRACOS algorithm. In SSRACOS algorithm, we record the noise reduction result in this parameter. :param attach: attached structure. self.set_attach() will be called after constructed a solution. You can define the behavior through rewrite Objecttive.__inherit function (just do nothing as default). See more details in Objective.set_inherit_func() :param post_attach: the attachment to the solution. self.set_post_attach() will be called after evaluated a solution. You can define the behavior through rewrite Objecttive.__post_inherit function (just do nothing as default). See more details in Objective.set_post_inherit_func() :param is_in_possible_solution: This is a meaningful parameter only when using the SSRACOS algorithm. In SSRACOS algorithm, a solution will be added to "possible solution list" after being re-sampling. This parameter is to mark if a solution has been added to "possible solution list". :param no: Serial number. For ASRacos.
https://github.com/polixir/zoopt/blob/050f56a8b288c5ee33d7eabadaf8d35726504e37/zoopt/solution.py#L23-L54
import copy import numpy as np from zoopt.utils.tool_function import ToolFunction from zoopt.utils.zoo_global import pos_inf, neg_inf, nan, gl class Solution:
MIT License
mairas/hysen
custom_components/hysen/climate.py
HASS_Hysen_Climate_Device.max_temp
python
def max_temp(self): return self._max_temp
Return the polling state.
https://github.com/mairas/hysen/blob/538e191710efd4cb5e9fbb43a909bf5d66786bc4/custom_components/hysen/climate.py#L639-L641
DEFAULT_NAME = 'Hysen Thermostat Controller' VERSION = '2.1.9' import asyncio import logging import binascii import voluptuous as vol import homeassistant.helpers.config_validation as cv import socket import datetime import time from datetime import timedelta from homeassistant import util try: from homeassistant.components.climate import ClimateEntity, PLATFORM_SCHEMA, ENTITY_ID_FORMAT except ImportError: from homeassistant.components.climate import ClimateDevice as ClimateEntity, PLATFORM_SCHEMA, ENTITY_ID_FORMAT from homeassistant.const import ( ATTR_TEMPERATURE, ATTR_ENTITY_ID, ATTR_UNIT_OF_MEASUREMENT, CONF_NAME, CONF_HOST, CONF_MAC, CONF_TIMEOUT, CONF_CUSTOMIZE, STATE_UNAVAILABLE) from homeassistant.components.climate.const import ( DOMAIN, SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_AUTO, PRESET_AWAY, PRESET_NONE, CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE) from homeassistant.helpers.entity import async_generate_entity_id _LOGGER = logging.getLogger(__name__) SUPPORT_FLAGS = SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE SUPPORT_HVAC = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF] SUPPORT_PRESET = [PRESET_NONE, PRESET_AWAY] DEFAULT_OPERATIONS_LIST = [HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_AUTO] SCAN_INTERVAL = timedelta(seconds=20) MIN_TIME_BETWEEN_SCANS = SCAN_INTERVAL MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100) DEFAULT_TIMEOUT = 5 UPDATE_RETRY_BEFORE_ERROR = 3 CONF_WIFI_SSID = "ssid" CONF_WIFI_PASSWORD ="password" CONF_WIFI_SECTYPE = "sectype" CONF_WIFI_TIMEOUT = "timeout" SERVICE_SET_WIFI = "hysen_config_wifi" SET_WIFI_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID,default="all"): cv.comp_entity_ids, vol.Required(CONF_WIFI_SSID): cv.string, vol.Required(CONF_WIFI_PASSWORD): cv.string, vol.Required(CONF_WIFI_SECTYPE): vol.Range(min=0, max=4), vol.Optional(CONF_WIFI_TIMEOUT,default=DEFAULT_TIMEOUT): vol.Range(min=0, max=99), }) DEFAULT_LOOPMODE = 0 DEFAULT_SENSORMODE = 0 DEFAULT_MINTEMP = 5 DEFAULT_MAXTEMP = 35 DEFAULT_ROOMTEMPOFFSET=0 DEFAULT_ANTIFREEZE = 1 DEFAULT_POWERONMEM = 1 DEFAULT_EXTERNALSENSORTEMPRANGE = 42 DEFAULT_DEADZONESENSORTEMPRANGE = 1 CONFIG_ADVANCED_LOOPMODE = "loop_mode" CONFIG_ADVANCED_SENSORMODE = "sensor_mode" CONFIG_ADVANCED_MINTEMP="min_temp" CONFIG_ADVANCED_MAXTEMP="max_temp" CONFIG_ADVANCED_ROOMTEMPOFFSET="roomtemp_offset" CONFIG_ADVANCED_ANTIFREEZE="anti_freeze_function" CONFIG_ADVANCED_POWERONMEM="poweron_mem" CONFIG_ADVANCED_EXTERNALSENSORTEMPRANGE = "external_sensor_temprange" CONFIG_ADVANCED_DEADZONESENSORTEMPRANGE = "deadzone_sensor_temprange" SERVICE_SET_ADVANCED = "hysen_set_advanced" SET_ADVANCED_SCHEMA = vol.Schema({ vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids, vol.Optional(CONFIG_ADVANCED_LOOPMODE,default=DEFAULT_LOOPMODE): vol.Range(min=0, max=2), vol.Optional(CONFIG_ADVANCED_SENSORMODE,default=DEFAULT_SENSORMODE): vol.Range(min=0, max=2), vol.Optional(CONFIG_ADVANCED_MINTEMP,default=DEFAULT_MINTEMP): vol.Range(min=5, max=99), vol.Optional(CONFIG_ADVANCED_MAXTEMP,default=DEFAULT_MAXTEMP): vol.Range(min=5, max=99), vol.Optional(CONFIG_ADVANCED_ROOMTEMPOFFSET,default=DEFAULT_ROOMTEMPOFFSET): vol.Coerce(float), vol.Optional(CONFIG_ADVANCED_ANTIFREEZE,default=DEFAULT_ANTIFREEZE): vol.Range(min=0, max=1), vol.Optional(CONFIG_ADVANCED_POWERONMEM,default=DEFAULT_POWERONMEM): vol.Range(min=0, max=1), vol.Optional(CONFIG_ADVANCED_EXTERNALSENSORTEMPRANGE,default=DEFAULT_EXTERNALSENSORTEMPRANGE): vol.Range(min=5, max=99), vol.Optional(CONFIG_ADVANCED_DEADZONESENSORTEMPRANGE,default=DEFAULT_DEADZONESENSORTEMPRANGE): vol.Range(min=1, max=99), }) CONFIG_REMOTELOCK = "remotelock" SERVICE_SET_REMOTELOCK = "hysen_set_remotelock" SET_REMOTELOCK_SCHEMA = vol.Schema({ vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids, vol.Required(CONFIG_REMOTELOCK): vol.Range(min=0, max=1), }) CONFIG_WEEK_PERIOD1_START = 'week_period1_start' CONFIG_WEEK_PERIOD1_TEMP = 'week_period1_temp' CONFIG_WEEK_PERIOD2_START = 'week_period2_start' CONFIG_WEEK_PERIOD2_TEMP = 'week_period2_temp' CONFIG_WEEK_PERIOD3_START = 'week_period3_start' CONFIG_WEEK_PERIOD3_TEMP = 'week_period3_temp' CONFIG_WEEK_PERIOD4_START = 'week_period4_start' CONFIG_WEEK_PERIOD4_TEMP = 'week_period4_temp' CONFIG_WEEK_PERIOD5_START = 'week_period5_start' CONFIG_WEEK_PERIOD5_TEMP = 'week_period5_temp' CONFIG_WEEK_PERIOD6_START = 'week_period6_start' CONFIG_WEEK_PERIOD6_TEMP = 'week_period6_temp' CONFIG_WEEKEND_PERIOD1_START = 'weekend_period1_start' CONFIG_WEEKEND_PERIOD1_TEMP = 'weekend_period1_temp' CONFIG_WEEKEND_PERIOD2_START = 'weekend_period2_start' CONFIG_WEEKEND_PERIOD2_TEMP = 'weekend_period2_temp' SERVICE_SET_TIME_SCHEDULE = "hysen_set_timeschedule" SET_TIME_SCHEDULE_SCHEMA = vol.Schema({ vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids, vol.Required(CONFIG_WEEK_PERIOD1_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD1_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEK_PERIOD2_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD2_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEK_PERIOD3_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD3_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEK_PERIOD4_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD4_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEK_PERIOD5_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD5_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEK_PERIOD6_START): cv.time, vol.Required(CONFIG_WEEK_PERIOD6_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEKEND_PERIOD1_START): cv.time, vol.Required(CONFIG_WEEKEND_PERIOD1_TEMP): vol.Coerce(float), vol.Required(CONFIG_WEEKEND_PERIOD2_START): cv.time, vol.Required(CONFIG_WEEKEND_PERIOD2_TEMP): vol.Coerce(float), }) HYSEN_POWERON = 1 HYSEN_POWEROFF = 0 HYSEN_MANUALMODE = 0 HYSEN_AUTOMODE = 1 DEFAULT_TARGET_TEMP = 20 DEFAULT_TARGET_TEMP_STEP = 1 DEFAULT_CONF_SYNC_CLOCK_TIME_ONCE_PER_DAY = False DEAFULT_CONF_USE_HA_FOR_HYSTERSIS = False DEAFULT_HA_FOR_HYSTERSIS_SAMPLE_COUNT = 2 DEAFULT_CONF_USE_HA_FOR_HYSTERSISA_BAIS = 0.5 CONF_DEVICES = 'devices' CONF_TARGET_TEMP = 'target_temp_default' CONF_TARGET_TEMP_STEP = 'target_temp_step' CONF_TIMEOUT = 'update_timeout' CONF_SYNC_CLOCK_TIME_ONCE_PER_DAY = 'sync_clock_time_per_day' CONF_GETCURERNTTEMP_FROM_SENSOR = "current_temp_from_sensor_override" CONF_USE_HA_FOR_HYSTERSIS="use_HA_for_hysteresis" CONF_DNSHOST = 'host_dns' CONF_HOST_PORT = 'host_port' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DEVICES, default={}): { cv.string: vol.Schema({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DNSHOST): cv.string, vol.Optional(CONF_HOST): vol.Match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$"), vol.Optional(CONF_HOST_PORT, default=80): vol.Range(min=1, max=65535), vol.Required(CONF_MAC): vol.Match("(?:[0-9a-fA-F]:?){12}"), vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Range(min=0, max=99), vol.Optional(CONF_TARGET_TEMP, default=DEFAULT_TARGET_TEMP): vol.Range(min=5, max=99), vol.Optional(CONF_TARGET_TEMP_STEP, default=DEFAULT_TARGET_TEMP_STEP): vol.Coerce(float), vol.Optional(CONF_SYNC_CLOCK_TIME_ONCE_PER_DAY, default=DEFAULT_CONF_SYNC_CLOCK_TIME_ONCE_PER_DAY): cv.boolean, vol.Optional(CONF_GETCURERNTTEMP_FROM_SENSOR, default=-1): vol.Range(min=-1, max=1), vol.Optional(CONF_USE_HA_FOR_HYSTERSIS, default=DEAFULT_CONF_USE_HA_FOR_HYSTERSIS): cv.boolean, }) }, }) async def devices_from_config(domain_config, hass): hass_devices = [] for device_id, config in domain_config[CONF_DEVICES].items(): name = config.get(CONF_NAME) dns_name = config.get(CONF_DNSHOST) ip_addr = config.get(CONF_HOST) ip_port = config.get(CONF_HOST_PORT) mac_addr = config.get(CONF_MAC) timeout = config.get(CONF_TIMEOUT) use_HA_for_hysteresis = config.get(CONF_USE_HA_FOR_HYSTERSIS) HA_hysteresis_bais = DEAFULT_CONF_USE_HA_FOR_HYSTERSISA_BAIS HA_hysteresis_sample_count_target = DEAFULT_HA_FOR_HYSTERSIS_SAMPLE_COUNT if (dns_name != None and ip_addr == None): try: ip_addr = socket.gethostbyname(dns_name) _LOGGER.warning("Discovered Broadlink Hysen Climate device address: %s, from name %s",ip_addr,dns_name) except Exception as error: _LOGGER.error("Failed resolve DNS name to IP for Broadlink Hysen Climate device:%s, error:%s",dns_name,error) operation_list = DEFAULT_OPERATIONS_LIST target_temp_default = config.get(CONF_TARGET_TEMP) target_temp_step = config.get(CONF_TARGET_TEMP_STEP) sync_clock_time_per_day = config.get(CONF_SYNC_CLOCK_TIME_ONCE_PER_DAY) get_current_temp_from_sensor_override = config.get(CONF_GETCURERNTTEMP_FROM_SENSOR) try: if (ip_addr != None): blmac_addr = binascii.unhexlify(mac_addr.encode().replace(b':', b'')) newhassdevice = await create_hysen_device(device_id, hass, name, broadlink_hysen_climate_device((ip_addr, ip_port), blmac_addr,timeout), target_temp_default, target_temp_step, operation_list, sync_clock_time_per_day, get_current_temp_from_sensor_override,use_HA_for_hysteresis,HA_hysteresis_bais,HA_hysteresis_sample_count_target) if (newhassdevice is not None): hass_devices.append(newhassdevice) else: _LOGGER.error("Failed to add Broadlink Hysen Climate device:%s @%s, %s , to HA",device_id, ip_addr, mac_addr.upper()) else: hysen_devices = broadlink_hysen_climate_device_discover(timeout) hysen_devicecount = len(hysen_devices) if hysen_devicecount > 0 : for hysen_device in hysen_devices: devicemac = ':'.join(format(x, '02x') for x in hysen_device.mac) devicemac = devicemac.upper() if (devicemac == mac_addr.upper()): newhassdevice = await create_hysen_device(device_id, hass, name, broadlink_hysen_climate_device((hysen_device.host[0], hysen_device.host[1]), devicemac,timeout), target_temp_default, target_temp_step, operation_list, sync_clock_time_per_day, get_current_temp_from_sensor_override,use_HA_for_hysteresis,HA_hysteresis_bais,HA_hysteresis_sample_count_target) if (newhassdevice is not None): hass_devices.append(newhassdevice) _LOGGER.warning("Discovered Broadlink Hysen Climate device : %s, at %s",devicemac,hysen_device.host[0]) else: _LOGGER.error("Failed to add Broadlink Hysen Climate device:%s @%s, %s , to HA",device_id, ip_addr, devicemac) else: _LOGGER.error("Broadlink Hysen Climate device MAC:%s not found.",mac_addr) else: _LOGGER.error("No Broadlink Hysen Climate device(s) found.") return [] except Exception as error: _LOGGER.error("Failed to connect to Broadlink Hysen Climate device MAC:%s, IP:%s, Error:%s", mac_addr,ip_addr, error) return hass_devices async def create_hysen_device(device_id,hass,name, broadlink_hysen_climate_device, target_temp_default,target_temp_step,operation_list, sync_clock_time_per_day,get_current_temp_from_sensor_override,use_HA_for_hysteresis,HA_hysteresis_bais,HA_hysteresis_sample_count_target): newhassdevice = None entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass) try: if (broadlink_hysen_climate_device.auth() == False): raise Exception('broadlink_response_error:','Inital auth failed for device') newhassdevice = HASS_Hysen_Climate_Device(entity_id, hass, name, broadlink_hysen_climate_device, target_temp_default,target_temp_step,operation_list, sync_clock_time_per_day,get_current_temp_from_sensor_override,use_HA_for_hysteresis,HA_hysteresis_bais,HA_hysteresis_sample_count_target) except Exception as error: _LOGGER.error("Failed to Authenticate with Broadlink Hysen Climate device:%s , %s ",entity_id, error) return newhassdevice async def async_setup_platform(hass, config, async_add_devices, discovery_info=None): """ ssid: yoursid password: yourpassword sectype: 4 timeout: 10 """ async def async_hysen_set_wifi(thermostat,service): ssid = service.data.get(CONF_WIFI_SSID) password = service.data.get(CONF_WIFI_PASSWORD) sectype = service.data.get(CONF_WIFI_SECTYPE) timeout = service.data.get(CONF_WIFI_TIMEOUT) try: broadlink_hysen_climate_device_setup(ssid, password, sectype) except Exception as error: _LOGGER.error("Failed to send Wifi setup to Broadlink Hysen Climate device(s):%s",error) return False _LOGGER.warning("Wifi setup to Broadlink Hysen Climate device(s) sent.") try: hysen_devices = broadlink_hysen_climate_device_discover(timeout) hysen_devicecount = len(hysen_devices) if hysen_devicecount > 0 : for hysen_device in hysen_devices: devicemac = ':'.join(format(x, '02x') for x in hysen_device.mac) _LOGGER.warning("Discovered Broadlink Hysen Climate device : %s, at %s, named: %s",devicemac.upper(),hysen_device.host[0],hysen_device.name) else: _LOGGER.warning("No Broadlink Hysen Climate device(s) found.") except Exception as error: _LOGGER.error("Failed to discover Broadlink Hysen Climate device(s):%s",error) return False return True """ entity_id: climate.house_thermostat poweron_mem: 1 """ async def async_hysen_set_advanced(thermostat,service): entity_id = service.data.get(ATTR_ENTITY_ID) if thermostat.entity_id not in entity_id: _LOGGER.error("Broadlink Hysen Climate device entity_id not found:%s",entity_id) return False loop_mode = service.data.get(CONFIG_ADVANCED_LOOPMODE) sensor_mode = service.data.get(CONFIG_ADVANCED_SENSORMODE) external_sensor_temprange = service.data.get(CONFIG_ADVANCED_EXTERNALSENSORTEMPRANGE) deadzone_sensor_temprange = service.data.get(CONFIG_ADVANCED_DEADZONESENSORTEMPRANGE) max_temp = service.data.get(CONFIG_ADVANCED_MAXTEMP) min_temp = service.data.get(CONFIG_ADVANCED_MINTEMP) roomtemp_offset = service.data.get(CONFIG_ADVANCED_ROOMTEMPOFFSET) anti_freeze_function = service.data.get(CONFIG_ADVANCED_ANTIFREEZE) poweron_mem = service.data.get(CONFIG_ADVANCED_POWERONMEM) try: thermostat.set_advanced(loop_mode, sensor_mode, external_sensor_temprange, deadzone_sensor_temprange, max_temp, min_temp, roomtemp_offset, anti_freeze_function, poweron_mem) except Exception as error: _LOGGER.error("Failed to send Advanced setup to Broadlink Hysen Climate device:%s,:",entity_id,error) return False _LOGGER.info("Advanced setup sent to Broadlink Hysen Climate device:%s",entity_id) return True """ entity_id: climate.house_thermostat week_period1_start: 06:30 week_period1_temp: 20.5 week_period2_start: 09:00 week_period2_temp: 17.0 week_period3_start: 13:00 week_period3_temp: 17.0 week_period4_start: 13:00 week_period4_temp: 17.0 week_period5_start: 17:00 week_period5_temp: 20.5 week_period6_start: 22:00 week_period6_temp: 17.0 weekend_period1_start: 7:30 weekend_period1_temp: 20.5 weekend_period2_start: 22:30 weekend_period2_temp: 17.0 """ async def async_hysen_set_time_schedule(thermostat,service): entity_id = service.data.get(ATTR_ENTITY_ID) if thermostat.entity_id not in entity_id: _LOGGER.error("Broadlink Hysen Climate device entity_id not found:%s",entity_id) return False WEEK_PERIOD1_START = service.data.get(CONFIG_WEEK_PERIOD1_START) WEEK_PERIOD1_TEMP = service.data.get(CONFIG_WEEK_PERIOD1_TEMP) WEEK_PERIOD2_START = service.data.get(CONFIG_WEEK_PERIOD2_START) WEEK_PERIOD2_TEMP = service.data.get(CONFIG_WEEK_PERIOD2_TEMP) WEEK_PERIOD3_START = service.data.get(CONFIG_WEEK_PERIOD3_START) WEEK_PERIOD3_TEMP = service.data.get(CONFIG_WEEK_PERIOD3_TEMP) WEEK_PERIOD4_START = service.data.get(CONFIG_WEEK_PERIOD4_START) WEEK_PERIOD4_TEMP = service.data.get(CONFIG_WEEK_PERIOD4_TEMP) WEEK_PERIOD5_START = service.data.get(CONFIG_WEEK_PERIOD5_START) WEEK_PERIOD5_TEMP = service.data.get(CONFIG_WEEK_PERIOD5_TEMP) WEEK_PERIOD6_START = service.data.get(CONFIG_WEEK_PERIOD6_START) WEEK_PERIOD6_TEMP = service.data.get(CONFIG_WEEK_PERIOD6_TEMP) WEEKEND_PERIOD1_START = service.data.get(CONFIG_WEEKEND_PERIOD1_START) WEEKEND_PERIOD1_TEMP = service.data.get(CONFIG_WEEKEND_PERIOD1_TEMP) WEEKEND_PERIOD2_START = service.data.get(CONFIG_WEEKEND_PERIOD2_START) WEEKEND_PERIOD2_TEMP = service.data.get(CONFIG_WEEKEND_PERIOD2_TEMP) week_period_1 = dict() week_period_1["start_hour"] = int(WEEK_PERIOD1_START.strftime('%H')) week_period_1["start_minute"] = int(WEEK_PERIOD1_START.strftime('%M')) week_period_1["temp"] = float(WEEK_PERIOD1_TEMP) week_period_2 = dict() week_period_2["start_hour"] = int(WEEK_PERIOD2_START.strftime('%H')) week_period_2["start_minute"] = int(WEEK_PERIOD2_START.strftime('%M')) week_period_2["temp"] = float(WEEK_PERIOD2_TEMP) week_period_3 = dict() week_period_3["start_hour"] = int(WEEK_PERIOD3_START.strftime('%H')) week_period_3["start_minute"] = int(WEEK_PERIOD3_START.strftime('%M')) week_period_3["temp"] = float(WEEK_PERIOD3_TEMP) week_period_4 = dict() week_period_4["start_hour"] = int(WEEK_PERIOD4_START.strftime('%H')) week_period_4["start_minute"] = int(WEEK_PERIOD4_START.strftime('%M')) week_period_4["temp"] = float(WEEK_PERIOD4_TEMP) week_period_5 = dict() week_period_5["start_hour"] = int(WEEK_PERIOD5_START.strftime('%H')) week_period_5["start_minute"] = int(WEEK_PERIOD5_START.strftime('%M')) week_period_5["temp"] = float(WEEK_PERIOD5_TEMP) week_period_6 = dict() week_period_6["start_hour"] = int(WEEK_PERIOD6_START.strftime('%H')) week_period_6["start_minute"] = int(WEEK_PERIOD6_START.strftime('%M')) week_period_6["temp"] = float(WEEK_PERIOD6_TEMP) weekend_period_1 = dict() weekend_period_1["start_hour"] = int(WEEKEND_PERIOD1_START.strftime('%H')) weekend_period_1["start_minute"] = int(WEEKEND_PERIOD1_START.strftime('%M')) weekend_period_1["temp"] = float(WEEKEND_PERIOD1_TEMP) weekend_period_2 = dict() weekend_period_2["start_hour"] = int(WEEKEND_PERIOD2_START.strftime('%H')) weekend_period_2["start_minute"] = int(WEEKEND_PERIOD2_START.strftime('%M')) weekend_period_2["temp"] = float(WEEKEND_PERIOD2_TEMP) weekday = [week_period_1, week_period_2, week_period_3, week_period_4, week_period_5, week_period_6] weekend = [weekend_period_1, weekend_period_2] try: thermostat.set_schedule(weekday, weekend) except Exception as error: _LOGGER.error("Failed to send Time schedule setup to Broadlink Hysen Climate device:%s,:",entity_id,error) return False _LOGGER.info("Time schedule sent to Broadlink Hysen Climate device:%s",entity_id) return True """ entity_id: climate.house_thermostat remotelock: 1 """ async def async_hysen_set_remotelock(thermostat,service): entity_id = service.data.get(ATTR_ENTITY_ID) if thermostat.entity_id not in entity_id: _LOGGER.error("Broadlink Hysen Climate device entity_id not found:%s",entity_id) return False tamper_lock = service.data.get(CONFIG_REMOTELOCK) try: thermostat.set_lock(tamper_lock) except Exception as error: _LOGGER.error("Failed to send Tamper Lock setting to Broadlink Hysen Climate device:%s,:",entity_id,error) return False _LOGGER.info("Remote Lock setting sent to Broadlink Hysen Climate device:%s",entity_id) return True hass.data[DOMAIN].async_register_entity_service( SERVICE_SET_WIFI, SET_WIFI_SCHEMA, async_hysen_set_wifi ) hass.data[DOMAIN].async_register_entity_service( SERVICE_SET_ADVANCED, SET_ADVANCED_SCHEMA, async_hysen_set_advanced ) hass.data[DOMAIN].async_register_entity_service( SERVICE_SET_TIME_SCHEDULE, SET_TIME_SCHEDULE_SCHEMA, async_hysen_set_time_schedule ) hass.data[DOMAIN].async_register_entity_service( SERVICE_SET_REMOTELOCK, SET_REMOTELOCK_SCHEMA, async_hysen_set_remotelock ) hass_devices = await devices_from_config(config, hass) if hass_devices: async_add_devices(hass_devices) class HASS_Hysen_Climate_Device(ClimateEntity): def __init__(self, entity_id, hass, name, broadlink_hysen_climate_device, target_temp_default, target_temp_step, operation_list,sync_clock_time_per_day,get_current_temp_from_sensor_override,use_HA_for_hysteresis,HA_hysteresis_bais,HA_hysteresis_sample_count_target): self.entity_id = entity_id self._hass = hass self._name = name self._HysenData = [] self._broadlink_hysen_climate_device = broadlink_hysen_climate_device self._sync_clock_time_per_day = sync_clock_time_per_day self._use_HA_for_hysteresis = use_HA_for_hysteresis self._HA_hysteresis_bais = HA_hysteresis_bais self._HA_hysteresis_sample_count_target = HA_hysteresis_sample_count_target self._use_HA_for_hysteresis_sample_count = 0 self._current_day_of_week = 0 self._get_current_temp_from_sensor_override = get_current_temp_from_sensor_override self._target_temperature = target_temp_default self._target_temperature_step = target_temp_step self._unit_of_measurement = hass.config.units.temperature_unit self._power_state = HYSEN_POWEROFF self._auto_state = HYSEN_MANUALMODE self._current_operation = STATE_UNAVAILABLE self._operation_list = operation_list self._away_mode = False self._awaymodeLastState = HVAC_MODE_OFF self._is_heating_active = 0 self._auto_override = 0 self._remote_lock = 0 self._loop_mode = DEFAULT_LOOPMODE self._sensor_mode = DEFAULT_SENSORMODE self._min_temp = DEFAULT_MINTEMP self._max_temp = DEFAULT_MAXTEMP self._roomtemp_offset = DEFAULT_ROOMTEMPOFFSET self._anti_freeze_function = DEFAULT_ANTIFREEZE self._poweron_mem = DEFAULT_POWERONMEM self._external_sensor_temprange = DEFAULT_EXTERNALSENSORTEMPRANGE self._deadzone_sensor_temprange = DEFAULT_DEADZONESENSORTEMPRANGE self._room_temp = 0 self._external_temp = 0 self._clock_hour = 0 self._clock_min = 0 self._clock_sec = 0 self._day_of_week = 1 self._week_day = "" self._week_end = "" self._update_error_count = 0 self._available = True @property def name(self): return self._name @property def available(self) -> bool: return self._available @property def temperature_unit(self): return self._unit_of_measurement @property def current_temperature(self): if self._get_current_temp_from_sensor_override == 0: return self._room_temp elif self._get_current_temp_from_sensor_override == 1: return self._external_temp else: if self._sensor_mode == 1: return self._external_temp else: return self._room_temp @property def min_temp(self): return self._min_temp @property
Apache License 2.0
pyccel/psydac
psydac/fem/splines.py
SplineSpace.compute_interpolant
python
def compute_interpolant( self, values, field ): assert len( values ) == self.nbasis assert isinstance( field, FemField ) assert field.space is self if not self._interpolation_ready: self.init_interpolation() n = self.nbasis c = field.coeffs c[0:n] = self._interpolator.solve( values ) c.update_ghost_regions()
Compute field (i.e. update its spline coefficients) such that it interpolates a certain function $f(x)$ at the Greville points. Parameters ---------- values : array_like (nbasis,) Function values $f(x_i)$ at the 'nbasis' Greville points $x_i$, to be interpolated. field : FemField Input/output argument: spline that has to interpolate the given values.
https://github.com/pyccel/psydac/blob/ddd3008a3f704814aa4e790853962243feae5d8a/psydac/fem/splines.py#L404-L431
import numpy as np from scipy.sparse import csc_matrix, csr_matrix, dia_matrix from sympde.topology.space import BasicFunctionSpace from psydac.linalg.stencil import StencilVectorSpace from psydac.linalg.direct_solvers import BandedSolver, SparseSolver from psydac.fem.basic import FemSpace, FemField from psydac.core.bsplines import ( find_span, basis_funs, collocation_matrix, histopolation_matrix, breakpoints, greville, make_knots, elevate_knots, basis_integrals, ) from psydac.utilities.quadratures import gauss_legendre from psydac.utilities.utils import unroll_edges __all__ = ['SplineSpace'] class SplineSpace( FemSpace ): def __init__( self, degree, knots=None, grid=None, multiplicity=None, parent_multiplicity=None, periodic=False, dirichlet=(False, False), basis='B', pads=None ): if basis not in ['B', 'M']: raise ValueError(" only options for basis functions are B or M ") if (knots is not None) and (grid is not None): raise ValueError( 'Cannot provide both grid and knots.' ) if (knots is None) and (grid is None): raise ValueError('Either knots or grid must be provided.') if (multiplicity is not None) and multiplicity<1: raise ValueError('multiplicity should be >=1') if (parent_multiplicity is not None) and parent_multiplicity<1: raise ValueError('parent_multiplicity should be >=1') if multiplicity is None:multiplicity = 1 if parent_multiplicity is None:parent_multiplicity = 1 assert parent_multiplicity >= multiplicity if knots is None: knots = make_knots( grid, degree, periodic, multiplicity ) if grid is None: grid = breakpoints(knots, degree) indices = np.where(np.diff(knots[degree+1:-degree-1])>1e-15)[0] if len(indices)>0: multiplicity = np.diff(indices).max() else: multiplicity = max(1,len(knots[degree+1:-degree-1])) if periodic: nbasis = len(knots) - 2*degree - 1 else: defect = 0 if dirichlet[0]: defect += 1 if dirichlet[1]: defect += 1 nbasis = len(knots) - degree - 1 - defect if basis == 'M': scaling_array = 1 / basis_integrals(knots, degree) else: scaling_array = None self._degree = degree self._pads = pads or degree self._knots = knots self._periodic = periodic self._multiplicity = multiplicity self._dirichlet = dirichlet self._basis = basis self._nbasis = nbasis self._breaks = grid self._ncells = len(grid) - 1 self._greville = greville(knots, degree, periodic) self._ext_greville = greville(elevate_knots(knots, degree, periodic), degree+1, periodic) self._scaling_array = scaling_array self._parent_multiplicity = parent_multiplicity self._histopolation_grid = unroll_edges(self.domain, self.ext_greville) self._vector_space = StencilVectorSpace([nbasis], [self._pads], [periodic]) self._interpolation_ready = False self._histopolation_ready = False self._symbolic_space = None @property def histopolation_grid(self): return self._histopolation_grid def init_interpolation( self ): imat = collocation_matrix( knots = self.knots, degree = self.degree, periodic = self.periodic, normalization = self.basis, xgrid = self.greville ) if self.periodic: self._interpolator = SparseSolver( csc_matrix( imat ) ) else: dmat = dia_matrix( imat ) l = abs( dmat.offsets.min() ) u = dmat.offsets.max() cmat = csr_matrix( dmat ) bmat = np.zeros( (1+u+2*l, cmat.shape[1]) ) for i,j in zip( *cmat.nonzero() ): bmat[u+l+i-j,j] = cmat[i,j] self._interpolator = BandedSolver( u, l, bmat ) self.imat = imat self._interpolation_ready = True def init_histopolation( self ): imat = histopolation_matrix( knots = self.knots, degree = self.degree, periodic = self.periodic, normalization = self.basis, xgrid = self.ext_greville ) self.hmat= imat if self.periodic: self._histopolator = SparseSolver( csc_matrix( imat ) ) else: dmat = dia_matrix( imat ) l = abs( dmat.offsets.min() ) u = dmat.offsets.max() cmat = csr_matrix( dmat ) bmat = np.zeros( (1+u+2*l, cmat.shape[1]) ) for i,j in zip( *cmat.nonzero() ): bmat[u+l+i-j,j] = cmat[i,j] self._histopolator = BandedSolver( u, l, bmat ) self._histopolation_ready = True @property def ldim( self ): return 1 @property def periodic( self ): return self._periodic @property def pads( self ): return self._pads @property def mapping( self ): return None @property def vector_space( self ): return self._vector_space @property def is_product(self): return False @property def symbolic_space( self ): return self._symbolic_space @symbolic_space.setter def symbolic_space( self, symbolic_space ): assert isinstance(symbolic_space, BasicFunctionSpace) self._symbolic_space = symbolic_space def eval_field( self, field, *eta , weights=None): assert isinstance( field, FemField ) assert field.space is self assert len( eta ) == 1 span = find_span( self.knots, self.degree, eta[0] ) basis = basis_funs( self.knots, self.degree, eta[0], span ) index = slice(span-self.degree, span+1) if self.basis == 'M': basis *= self._scaling_array[index] coeffs = field.coeffs[index].copy() if weights: coeffs *= weights[index] return np.dot( coeffs, basis ) def eval_field_gradient( self, field, *eta , weights=None): assert isinstance( field, FemField ) assert field.space is self assert len( eta ) == 1 raise NotImplementedError() @property def is_scalar( self ): return True @property def basis( self ): return self._basis @property def interpolation_grid( self ): if self.basis == 'B': return self.greville elif self.basis == 'M': return self.ext_greville else: raise NotImplementedError() @property def nbasis( self ): return self._nbasis @property def degree( self ): return self._degree @property def ncells( self ): return self._ncells @property def dirichlet( self ): return self._dirichlet @property def knots( self ): return self._knots @property def multiplicity( self ): return self._multiplicity @property def parent_multiplicity( self ): return self._parent_multiplicity @property def breaks( self ): return self._breaks @property def domain( self ): breaks = self.breaks return breaks[0], breaks[-1] @property def greville( self ): return self._greville @property def ext_greville( self ): return self._ext_greville @property def scaling_array(self): return self._scaling_array
MIT License
tektoncd/experimental
sdk/python/tekton_pipeline/models/v1beta1_step.py
V1beta1Step.stdin
python
def stdin(self, stdin): self._stdin = stdin
Sets the stdin of this V1beta1Step. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501 :param stdin: The stdin of this V1beta1Step. # noqa: E501 :type: bool
https://github.com/tektoncd/experimental/blob/0ba4e7a2b9d45ed4accaecbb34dac006d665796a/sdk/python/tekton_pipeline/models/v1beta1_step.py#L535-L544
import pprint import re import six from tekton_pipeline.configuration import Configuration class V1beta1Step(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'args': 'list[str]', 'command': 'list[str]', 'env': 'list[V1EnvVar]', 'env_from': 'list[V1EnvFromSource]', 'image': 'str', 'image_pull_policy': 'str', 'lifecycle': 'V1Lifecycle', 'liveness_probe': 'V1Probe', 'name': 'str', 'ports': 'list[V1ContainerPort]', 'readiness_probe': 'V1Probe', 'resources': 'V1ResourceRequirements', 'script': 'str', 'security_context': 'V1SecurityContext', 'startup_probe': 'V1Probe', 'stdin': 'bool', 'stdin_once': 'bool', 'termination_message_path': 'str', 'termination_message_policy': 'str', 'timeout': 'V1Duration', 'tty': 'bool', 'volume_devices': 'list[V1VolumeDevice]', 'volume_mounts': 'list[V1VolumeMount]', 'working_dir': 'str', 'workspaces': 'list[V1beta1WorkspaceUsage]' } attribute_map = { 'args': 'args', 'command': 'command', 'env': 'env', 'env_from': 'envFrom', 'image': 'image', 'image_pull_policy': 'imagePullPolicy', 'lifecycle': 'lifecycle', 'liveness_probe': 'livenessProbe', 'name': 'name', 'ports': 'ports', 'readiness_probe': 'readinessProbe', 'resources': 'resources', 'script': 'script', 'security_context': 'securityContext', 'startup_probe': 'startupProbe', 'stdin': 'stdin', 'stdin_once': 'stdinOnce', 'termination_message_path': 'terminationMessagePath', 'termination_message_policy': 'terminationMessagePolicy', 'timeout': 'timeout', 'tty': 'tty', 'volume_devices': 'volumeDevices', 'volume_mounts': 'volumeMounts', 'working_dir': 'workingDir', 'workspaces': 'workspaces' } def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name='', ports=None, readiness_probe=None, resources=None, script=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, termination_message_path=None, termination_message_policy=None, timeout=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, workspaces=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._args = None self._command = None self._env = None self._env_from = None self._image = None self._image_pull_policy = None self._lifecycle = None self._liveness_probe = None self._name = None self._ports = None self._readiness_probe = None self._resources = None self._script = None self._security_context = None self._startup_probe = None self._stdin = None self._stdin_once = None self._termination_message_path = None self._termination_message_policy = None self._timeout = None self._tty = None self._volume_devices = None self._volume_mounts = None self._working_dir = None self._workspaces = None self.discriminator = None if args is not None: self.args = args if command is not None: self.command = command if env is not None: self.env = env if env_from is not None: self.env_from = env_from if image is not None: self.image = image if image_pull_policy is not None: self.image_pull_policy = image_pull_policy if lifecycle is not None: self.lifecycle = lifecycle if liveness_probe is not None: self.liveness_probe = liveness_probe self.name = name if ports is not None: self.ports = ports if readiness_probe is not None: self.readiness_probe = readiness_probe if resources is not None: self.resources = resources if script is not None: self.script = script if security_context is not None: self.security_context = security_context if startup_probe is not None: self.startup_probe = startup_probe if stdin is not None: self.stdin = stdin if stdin_once is not None: self.stdin_once = stdin_once if termination_message_path is not None: self.termination_message_path = termination_message_path if termination_message_policy is not None: self.termination_message_policy = termination_message_policy if timeout is not None: self.timeout = timeout if tty is not None: self.tty = tty if volume_devices is not None: self.volume_devices = volume_devices if volume_mounts is not None: self.volume_mounts = volume_mounts if working_dir is not None: self.working_dir = working_dir if workspaces is not None: self.workspaces = workspaces @property def args(self): return self._args @args.setter def args(self, args): self._args = args @property def command(self): return self._command @command.setter def command(self, command): self._command = command @property def env(self): return self._env @env.setter def env(self, env): self._env = env @property def env_from(self): return self._env_from @env_from.setter def env_from(self, env_from): self._env_from = env_from @property def image(self): return self._image @image.setter def image(self, image): self._image = image @property def image_pull_policy(self): return self._image_pull_policy @image_pull_policy.setter def image_pull_policy(self, image_pull_policy): self._image_pull_policy = image_pull_policy @property def lifecycle(self): return self._lifecycle @lifecycle.setter def lifecycle(self, lifecycle): self._lifecycle = lifecycle @property def liveness_probe(self): return self._liveness_probe @liveness_probe.setter def liveness_probe(self, liveness_probe): self._liveness_probe = liveness_probe @property def name(self): return self._name @name.setter def name(self, name): if self.local_vars_configuration.client_side_validation and name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def ports(self): return self._ports @ports.setter def ports(self, ports): self._ports = ports @property def readiness_probe(self): return self._readiness_probe @readiness_probe.setter def readiness_probe(self, readiness_probe): self._readiness_probe = readiness_probe @property def resources(self): return self._resources @resources.setter def resources(self, resources): self._resources = resources @property def script(self): return self._script @script.setter def script(self, script): self._script = script @property def security_context(self): return self._security_context @security_context.setter def security_context(self, security_context): self._security_context = security_context @property def startup_probe(self): return self._startup_probe @startup_probe.setter def startup_probe(self, startup_probe): self._startup_probe = startup_probe @property def stdin(self): return self._stdin @stdin.setter
Apache License 2.0
nuagenetworks/vspk-python
vspk/v6/nuapplication.py
NUApplication.destination_port
python
def destination_port(self): return self._destination_port
Get destination_port value. Notes: value should be either * or single port number or maximum 5 ranges comma separated. This attribute is named `destinationPort` in VSD API.
https://github.com/nuagenetworks/vspk-python/blob/375cce10ae144ad6017104e57fcd3630898cc2a6/vspk/v6/nuapplication.py#L460-L470
from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from .fetchers import NUMonitorscopesFetcher from .fetchers import NUApplicationBindingsFetcher from bambou import NURESTObject class NUApplication(NURESTObject): __rest_name__ = "application" __resource_name__ = "applications" CONST_POST_CLASSIFICATION_PATH_ANY = "ANY" CONST_PROTOCOL_NONE = "NONE" CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET = "FIRST_PACKET" CONST_PRE_CLASSIFICATION_PATH_PRIMARY = "PRIMARY" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_PRE_CLASSIFICATION_PATH_SECONDARY = "SECONDARY" CONST_PERFORMANCE_MONITOR_TYPE_CONTINUOUS = "CONTINUOUS" CONST_OPTIMIZE_PATH_SELECTION_PACKETLOSS = "PACKETLOSS" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_OPTIMIZE_PATH_SELECTION_LATENCY = "LATENCY" CONST_OPTIMIZE_PATH_SELECTION_JITTER = "JITTER" CONST_PROTOCOL_UDP = "UDP" CONST_POST_CLASSIFICATION_PATH_PRIMARY = "PRIMARY" CONST_POST_CLASSIFICATION_PATH_SECONDARY = "SECONDARY" CONST_PERFORMANCE_MONITOR_TYPE_FIRST_PACKET_AND_CONTINUOUS = "FIRST_PACKET_AND_CONTINUOUS" CONST_PROTOCOL_TCP = "TCP" CONST_PRE_CLASSIFICATION_PATH_DEFAULT = "DEFAULT" def __init__(self, **kwargs): super(NUApplication, self).__init__() self._dscp = None self._name = None self._bandwidth = None self._last_updated_by = None self._last_updated_date = None self._read_only = None self._performance_monitor_type = None self._certificate_common_name = None self._description = None self._destination_ip = None self._destination_port = None self._network_symmetry = None self._embedded_metadata = None self._enable_pps = None self._one_way_delay = None self._one_way_jitter = None self._one_way_loss = None self._entity_scope = None self._post_classification_path = None self._source_ip = None self._source_port = None self._app_id = None self._optimize_path_selection = None self._pre_classification_path = None self._creation_date = None self._protocol = None self._associated_l7_application_signature_id = None self._ether_type = None self._owner = None self._external_id = None self._symmetry = None self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="bandwidth", remote_name="bandwidth", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="read_only", remote_name="readOnly", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="performance_monitor_type", remote_name="performanceMonitorType", attribute_type=str, is_required=False, is_unique=False, choices=[u'CONTINUOUS', u'FIRST_PACKET', u'FIRST_PACKET_AND_CONTINUOUS']) self.expose_attribute(local_name="certificate_common_name", remote_name="certificateCommonName", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="destination_ip", remote_name="destinationIP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="network_symmetry", remote_name="networkSymmetry", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="enable_pps", remote_name="enablePPS", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_delay", remote_name="oneWayDelay", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_jitter", remote_name="oneWayJitter", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="one_way_loss", remote_name="oneWayLoss", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="post_classification_path", remote_name="postClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'PRIMARY', u'SECONDARY']) self.expose_attribute(local_name="source_ip", remote_name="sourceIP", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="app_id", remote_name="appId", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="optimize_path_selection", remote_name="optimizePathSelection", attribute_type=str, is_required=False, is_unique=False, choices=[u'JITTER', u'LATENCY', u'PACKETLOSS']) self.expose_attribute(local_name="pre_classification_path", remote_name="preClassificationPath", attribute_type=str, is_required=False, is_unique=False, choices=[u'DEFAULT', u'PRIMARY', u'SECONDARY']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False, choices=[u'NONE', u'TCP', u'UDP']) self.expose_attribute(local_name="associated_l7_application_signature_id", remote_name="associatedL7ApplicationSignatureID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name="symmetry", remote_name="symmetry", attribute_type=bool, is_required=False, is_unique=False) self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.monitorscopes = NUMonitorscopesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.application_bindings = NUApplicationBindingsFetcher.fetcher_with_object(parent_object=self, relationship="member") self._compute_args(**kwargs) @property def dscp(self): return self._dscp @dscp.setter def dscp(self, value): self._dscp = value @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def bandwidth(self): return self._bandwidth @bandwidth.setter def bandwidth(self, value): self._bandwidth = value @property def last_updated_by(self): return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): self._last_updated_by = value @property def last_updated_date(self): return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): self._last_updated_date = value @property def read_only(self): return self._read_only @read_only.setter def read_only(self, value): self._read_only = value @property def performance_monitor_type(self): return self._performance_monitor_type @performance_monitor_type.setter def performance_monitor_type(self, value): self._performance_monitor_type = value @property def certificate_common_name(self): return self._certificate_common_name @certificate_common_name.setter def certificate_common_name(self, value): self._certificate_common_name = value @property def description(self): return self._description @description.setter def description(self, value): self._description = value @property def destination_ip(self): return self._destination_ip @destination_ip.setter def destination_ip(self, value): self._destination_ip = value @property
BSD 3-Clause New or Revised License
olitheolix/aiokubernetes
aiokubernetes/models/v1_volume.py
V1Volume.vsphere_volume
python
def vsphere_volume(self): return self._vsphere_volume
Gets the vsphere_volume of this V1Volume. # noqa: E501 VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine # noqa: E501 :return: The vsphere_volume of this V1Volume. # noqa: E501 :rtype: V1VsphereVirtualDiskVolumeSource
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_volume.py#L835-L843
import pprint import re from aiokubernetes.models.v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource from aiokubernetes.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource from aiokubernetes.models.v1_azure_file_volume_source import V1AzureFileVolumeSource from aiokubernetes.models.v1_ceph_fs_volume_source import V1CephFSVolumeSource from aiokubernetes.models.v1_cinder_volume_source import V1CinderVolumeSource from aiokubernetes.models.v1_config_map_volume_source import V1ConfigMapVolumeSource from aiokubernetes.models.v1_downward_api_volume_source import V1DownwardAPIVolumeSource from aiokubernetes.models.v1_empty_dir_volume_source import V1EmptyDirVolumeSource from aiokubernetes.models.v1_fc_volume_source import V1FCVolumeSource from aiokubernetes.models.v1_flex_volume_source import V1FlexVolumeSource from aiokubernetes.models.v1_flocker_volume_source import V1FlockerVolumeSource from aiokubernetes.models.v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource from aiokubernetes.models.v1_git_repo_volume_source import V1GitRepoVolumeSource from aiokubernetes.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource from aiokubernetes.models.v1_host_path_volume_source import V1HostPathVolumeSource from aiokubernetes.models.v1_iscsi_volume_source import V1ISCSIVolumeSource from aiokubernetes.models.v1_nfs_volume_source import V1NFSVolumeSource from aiokubernetes.models.v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource from aiokubernetes.models.v1_photon_persistent_disk_volume_source import V1PhotonPersistentDiskVolumeSource from aiokubernetes.models.v1_portworx_volume_source import V1PortworxVolumeSource from aiokubernetes.models.v1_projected_volume_source import V1ProjectedVolumeSource from aiokubernetes.models.v1_quobyte_volume_source import V1QuobyteVolumeSource from aiokubernetes.models.v1_rbd_volume_source import V1RBDVolumeSource from aiokubernetes.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource from aiokubernetes.models.v1_secret_volume_source import V1SecretVolumeSource from aiokubernetes.models.v1_storage_os_volume_source import V1StorageOSVolumeSource from aiokubernetes.models.v1_vsphere_virtual_disk_volume_source import V1VsphereVirtualDiskVolumeSource class V1Volume(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource', 'azure_disk': 'V1AzureDiskVolumeSource', 'azure_file': 'V1AzureFileVolumeSource', 'cephfs': 'V1CephFSVolumeSource', 'cinder': 'V1CinderVolumeSource', 'config_map': 'V1ConfigMapVolumeSource', 'downward_api': 'V1DownwardAPIVolumeSource', 'empty_dir': 'V1EmptyDirVolumeSource', 'fc': 'V1FCVolumeSource', 'flex_volume': 'V1FlexVolumeSource', 'flocker': 'V1FlockerVolumeSource', 'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource', 'git_repo': 'V1GitRepoVolumeSource', 'glusterfs': 'V1GlusterfsVolumeSource', 'host_path': 'V1HostPathVolumeSource', 'iscsi': 'V1ISCSIVolumeSource', 'name': 'str', 'nfs': 'V1NFSVolumeSource', 'persistent_volume_claim': 'V1PersistentVolumeClaimVolumeSource', 'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource', 'portworx_volume': 'V1PortworxVolumeSource', 'projected': 'V1ProjectedVolumeSource', 'quobyte': 'V1QuobyteVolumeSource', 'rbd': 'V1RBDVolumeSource', 'scale_io': 'V1ScaleIOVolumeSource', 'secret': 'V1SecretVolumeSource', 'storageos': 'V1StorageOSVolumeSource', 'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource' } attribute_map = { 'aws_elastic_block_store': 'awsElasticBlockStore', 'azure_disk': 'azureDisk', 'azure_file': 'azureFile', 'cephfs': 'cephfs', 'cinder': 'cinder', 'config_map': 'configMap', 'downward_api': 'downwardAPI', 'empty_dir': 'emptyDir', 'fc': 'fc', 'flex_volume': 'flexVolume', 'flocker': 'flocker', 'gce_persistent_disk': 'gcePersistentDisk', 'git_repo': 'gitRepo', 'glusterfs': 'glusterfs', 'host_path': 'hostPath', 'iscsi': 'iscsi', 'name': 'name', 'nfs': 'nfs', 'persistent_volume_claim': 'persistentVolumeClaim', 'photon_persistent_disk': 'photonPersistentDisk', 'portworx_volume': 'portworxVolume', 'projected': 'projected', 'quobyte': 'quobyte', 'rbd': 'rbd', 'scale_io': 'scaleIO', 'secret': 'secret', 'storageos': 'storageos', 'vsphere_volume': 'vsphereVolume' } def __init__(self, aws_elastic_block_store=None, azure_disk=None, azure_file=None, cephfs=None, cinder=None, config_map=None, downward_api=None, empty_dir=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, git_repo=None, glusterfs=None, host_path=None, iscsi=None, name=None, nfs=None, persistent_volume_claim=None, photon_persistent_disk=None, portworx_volume=None, projected=None, quobyte=None, rbd=None, scale_io=None, secret=None, storageos=None, vsphere_volume=None): self._aws_elastic_block_store = None self._azure_disk = None self._azure_file = None self._cephfs = None self._cinder = None self._config_map = None self._downward_api = None self._empty_dir = None self._fc = None self._flex_volume = None self._flocker = None self._gce_persistent_disk = None self._git_repo = None self._glusterfs = None self._host_path = None self._iscsi = None self._name = None self._nfs = None self._persistent_volume_claim = None self._photon_persistent_disk = None self._portworx_volume = None self._projected = None self._quobyte = None self._rbd = None self._scale_io = None self._secret = None self._storageos = None self._vsphere_volume = None self.discriminator = None if aws_elastic_block_store is not None: self.aws_elastic_block_store = aws_elastic_block_store if azure_disk is not None: self.azure_disk = azure_disk if azure_file is not None: self.azure_file = azure_file if cephfs is not None: self.cephfs = cephfs if cinder is not None: self.cinder = cinder if config_map is not None: self.config_map = config_map if downward_api is not None: self.downward_api = downward_api if empty_dir is not None: self.empty_dir = empty_dir if fc is not None: self.fc = fc if flex_volume is not None: self.flex_volume = flex_volume if flocker is not None: self.flocker = flocker if gce_persistent_disk is not None: self.gce_persistent_disk = gce_persistent_disk if git_repo is not None: self.git_repo = git_repo if glusterfs is not None: self.glusterfs = glusterfs if host_path is not None: self.host_path = host_path if iscsi is not None: self.iscsi = iscsi self.name = name if nfs is not None: self.nfs = nfs if persistent_volume_claim is not None: self.persistent_volume_claim = persistent_volume_claim if photon_persistent_disk is not None: self.photon_persistent_disk = photon_persistent_disk if portworx_volume is not None: self.portworx_volume = portworx_volume if projected is not None: self.projected = projected if quobyte is not None: self.quobyte = quobyte if rbd is not None: self.rbd = rbd if scale_io is not None: self.scale_io = scale_io if secret is not None: self.secret = secret if storageos is not None: self.storageos = storageos if vsphere_volume is not None: self.vsphere_volume = vsphere_volume @property def aws_elastic_block_store(self): return self._aws_elastic_block_store @aws_elastic_block_store.setter def aws_elastic_block_store(self, aws_elastic_block_store): self._aws_elastic_block_store = aws_elastic_block_store @property def azure_disk(self): return self._azure_disk @azure_disk.setter def azure_disk(self, azure_disk): self._azure_disk = azure_disk @property def azure_file(self): return self._azure_file @azure_file.setter def azure_file(self, azure_file): self._azure_file = azure_file @property def cephfs(self): return self._cephfs @cephfs.setter def cephfs(self, cephfs): self._cephfs = cephfs @property def cinder(self): return self._cinder @cinder.setter def cinder(self, cinder): self._cinder = cinder @property def config_map(self): return self._config_map @config_map.setter def config_map(self, config_map): self._config_map = config_map @property def downward_api(self): return self._downward_api @downward_api.setter def downward_api(self, downward_api): self._downward_api = downward_api @property def empty_dir(self): return self._empty_dir @empty_dir.setter def empty_dir(self, empty_dir): self._empty_dir = empty_dir @property def fc(self): return self._fc @fc.setter def fc(self, fc): self._fc = fc @property def flex_volume(self): return self._flex_volume @flex_volume.setter def flex_volume(self, flex_volume): self._flex_volume = flex_volume @property def flocker(self): return self._flocker @flocker.setter def flocker(self, flocker): self._flocker = flocker @property def gce_persistent_disk(self): return self._gce_persistent_disk @gce_persistent_disk.setter def gce_persistent_disk(self, gce_persistent_disk): self._gce_persistent_disk = gce_persistent_disk @property def git_repo(self): return self._git_repo @git_repo.setter def git_repo(self, git_repo): self._git_repo = git_repo @property def glusterfs(self): return self._glusterfs @glusterfs.setter def glusterfs(self, glusterfs): self._glusterfs = glusterfs @property def host_path(self): return self._host_path @host_path.setter def host_path(self, host_path): self._host_path = host_path @property def iscsi(self): return self._iscsi @iscsi.setter def iscsi(self, iscsi): self._iscsi = iscsi @property def name(self): return self._name @name.setter def name(self, name): if name is None: raise ValueError("Invalid value for `name`, must not be `None`") self._name = name @property def nfs(self): return self._nfs @nfs.setter def nfs(self, nfs): self._nfs = nfs @property def persistent_volume_claim(self): return self._persistent_volume_claim @persistent_volume_claim.setter def persistent_volume_claim(self, persistent_volume_claim): self._persistent_volume_claim = persistent_volume_claim @property def photon_persistent_disk(self): return self._photon_persistent_disk @photon_persistent_disk.setter def photon_persistent_disk(self, photon_persistent_disk): self._photon_persistent_disk = photon_persistent_disk @property def portworx_volume(self): return self._portworx_volume @portworx_volume.setter def portworx_volume(self, portworx_volume): self._portworx_volume = portworx_volume @property def projected(self): return self._projected @projected.setter def projected(self, projected): self._projected = projected @property def quobyte(self): return self._quobyte @quobyte.setter def quobyte(self, quobyte): self._quobyte = quobyte @property def rbd(self): return self._rbd @rbd.setter def rbd(self, rbd): self._rbd = rbd @property def scale_io(self): return self._scale_io @scale_io.setter def scale_io(self, scale_io): self._scale_io = scale_io @property def secret(self): return self._secret @secret.setter def secret(self, secret): self._secret = secret @property def storageos(self): return self._storageos @storageos.setter def storageos(self, storageos): self._storageos = storageos @property
Apache License 2.0
pygame/pygameweb
tests/functional/pygameweb/project/test_project_views.py
project2
python
def project2(session, project, user): title = 'Some project title 2' version = 'some version' release_description = 'release description 2' (the_project2, release1, tag3, tag4) = a_project(session, title, release_description, version, user) session.add(release1) session.add(tag3) session.add(tag4) session.add(the_project2) return the_project2
adds a second project with a couple of tags.
https://github.com/pygame/pygameweb/blob/144b2483d090c1ecd9482eb7d47454137210ba9d/tests/functional/pygameweb/project/test_project_views.py#L152-L167
from pathlib import Path import pytest import mock @pytest.fixture def project_client(app, session, client): from pygameweb.project.views import add_project_blueprint from pygameweb.user.views import add_user_blueprint from pygameweb.sidebar.views import add_sidebar from pygameweb.thumb.views import add_thumb_blueprint add_sidebar(app) add_user_blueprint(app) add_project_blueprint(app) add_thumb_blueprint(app) return client def a_user(app, session, project_client, name, email, logged_in, disabled, active): from pygameweb.user.models import User, Group from flask_security.utils import encrypt_password group = Group(name='members', title='Member') user = User(name=name, email=email, password=encrypt_password('password'), disabled=disabled, active=active, roles=[group]) session.add(user) session.commit() with project_client.session_transaction() as sess: sess['user_id'] = user.id sess['_fresh'] = True return user @pytest.fixture def user(app, session, project_client): return a_user(app, session, project_client, 'joe', 'asdf@example.com', logged_in=True, disabled=0, active=True) @pytest.fixture def user_banned(app, session, project_client): return a_user(app, session, project_client, 'joebanned', 'asdf2@example.com', logged_in=False, disabled=1, active=False) @pytest.fixture def project(session, user): import datetime from pygameweb.project.models import Project, Release, Projectcomment, Tags the_project = Project( title='Some project title 1', summary='Summary of some project 1.', description='Description of some project.', uri='http://some.example.com/', datetimeon=datetime.datetime(2017, 1, 5), image='1.png', youtube_trailer='https://www.youtube.com/watch?v=8UnvMe1Neok', github_repo='https://github.com/pygame/pygameweb/', patreon='https://www.patreon.com/pygame', user=user ) tag1 = Tags(project=the_project, value='game') tag2 = Tags(project=the_project, value='arcade') session.add(tag1) session.add(tag2) release1 = Release(datetimeon=datetime.datetime(2017, 1, 5), description='Some release.', srcuri='http://example.com/source.tar.gz', winuri='http://example.com/win.exe', macuri='http://example.com/mac.dmg', version='A release title.') release2 = Release(datetimeon=datetime.datetime(2017, 1, 6), description='Some release with new things.', srcuri='http://example.com/source.tar.gz', winuri='http://example.com/win.exe', macuri='http://example.com/mac.dmg', version='A second release title.') the_project.releases.append(release1) the_project.releases.append(release2) comment1 = Projectcomment(user=user, content="Some comment 1.", rating=5) comment2 = Projectcomment(user=user, content="Some comment 2.", rating=3) the_project.comments.append(comment1) the_project.comments.append(comment2) session.add(the_project) session.commit() return the_project def a_project(session, title, release_description, version, user): import datetime from pygameweb.project.models import Project, Tags, Release the_project2 = Project( title=title, summary='Summary of some project 2.', description='Description of some project 2.', uri='http://some.example.com/', datetimeon=datetime.datetime(2017, 1, 8), image='1.png', user=user ) release1 = Release(datetimeon=datetime.datetime(2017, 1, 5), description=release_description, srcuri='http://example.com/source.tar.gz', winuri='http://example.com/win.exe', macuri='http://example.com/mac.dmg', version=version) the_project2.releases.append(release1) tag3 = Tags(project=the_project2, value='2d') tag4 = Tags(project=the_project2, value='arcade') return the_project2, release1, tag3, tag4 @pytest.fixture
BSD 2-Clause Simplified License
apple/ccs-twistedextensions
twext/enterprise/adbapi2.py
_CommitAndAbortHooks._commitWithHooks
python
def _commitWithHooks(self, doCommit): pre = self._preCommit.runHooks() def ok(ignored): self._abort.clear() return doCommit().addCallback(self._commit.runHooks) def failed(why): return self.abort().addCallback(lambda ignored: why) return pre.addCallbacks(ok, failed)
Run pre-hooks, commit, the real DB commit, and then post-hooks.
https://github.com/apple/ccs-twistedextensions/blob/2c4046df88873dcf33fba7840ed90e4238dcbec7/twext/enterprise/adbapi2.py#L639-L652
import sys import weakref import time from cStringIO import StringIO from cPickle import dumps, loads from itertools import count from zope.interface import implements from twisted.internet.defer import inlineCallbacks from twisted.internet.defer import returnValue from twisted.internet.defer import DeferredList from twisted.internet.defer import Deferred from twisted.protocols.amp import Boolean from twisted.python.failure import Failure from twisted.protocols.amp import Argument, String, Command, AMP, Integer from twisted.internet import reactor as _reactor from twisted.application.service import Service from twisted.internet.defer import maybeDeferred from twisted.python.components import proxyForInterface from twext.internet.threadutils import ThreadHolder from twisted.internet.defer import succeed from twext.enterprise.ienterprise import ConnectionError from twext.enterprise.ienterprise import IDerivedParameter from twisted.internet.defer import fail from twext.enterprise.ienterprise import ( AlreadyFinishedError, IAsyncTransaction, ICommandBlock, DatabaseType, POSTGRES_DIALECT, ) from twext.python.log import Logger log = Logger() DEFAULT_PARAM_STYLE = "pyformat" DEFAULT_DIALECT = POSTGRES_DIALECT DEFAULT_DBTYPE = DatabaseType(DEFAULT_DIALECT, DEFAULT_PARAM_STYLE) def _forward(thunk): @property def getter(self): return getattr(self._pool, thunk.func_name) return getter def _destructively(aList): while aList: yield aList.pop(0) def _deriveParameters(cursor, args): derived = None for n, arg in enumerate(args): if IDerivedParameter.providedBy(arg): if derived is None: derived = [] derived.append(arg) args[n] = arg.preQuery(cursor) return derived def _deriveQueryEnded(cursor, derived): for arg in derived: arg.postQuery(cursor) class _ConnectedTxn(object): implements(IAsyncTransaction) noisy = False def __init__(self, pool, threadHolder, connection, cursor, label=None): self._pool = pool self._completed = "idle" self._cursor = cursor self._connection = connection self._holder = threadHolder self._first = True self._label = label def __repr__(self): return "_ConnectedTxn({})".format(self._label) @_forward def dbtype(self): def _reallyExecSQL(self, sql, args=None, raiseOnZeroRowCount=None): stmts = sql.splitlines() if stmts[-1].startswith("call "): return self._reallyCallSQL(stmts[-1], args) wasFirst = self._first self._first = False if args is None: args = [] derived = _deriveParameters(self._cursor, args) try: self._cursor.execute(sql, args) except: if wasFirst: log.failure( "Exception from execute() on first statement in " "transaction. Possibly caused by a database server " "restart. Automatically reconnecting now.", failure=Failure(), ) try: self._connection.close() except: log.failure( "Exception from close() while automatically " "reconnecting. (Probably not serious.)", failure=Failure(), ) self._connection = self._pool.connectionFactory() self._cursor = self._connection.cursor() result = self._reallyExecSQL(sql, args, raiseOnZeroRowCount) return result else: raise if derived is not None: _deriveQueryEnded(self._cursor, derived) if self._cursor.description: rows = self._cursor.fetchall() if not rows: if raiseOnZeroRowCount is not None: raise raiseOnZeroRowCount() return rows else: if raiseOnZeroRowCount is not None and self._cursor.rowcount == 0: raise raiseOnZeroRowCount() return [[]] * self._cursor.rowcount def _reallyCallSQL(self, sql, args=None): if not sql.startswith("call ") or not sql.endswith("()"): raise ValueError("Invalid SQL CALL statement: {}".format(sql)) name = sql[5:-2] returnType = args[0] args = args[1:] wasFirst = self._first self._first = False try: if returnType is not None: returnValue = self._cursor.callfunc(name, returnType, args) returnValue = [returnValue, ] else: returnValue = self._cursor.callproc(name, args) returnValue = [returnValue, ] except: if wasFirst: log.failure( "Exception from execute() on first statement in " "transaction. Possibly caused by a database server " "restart. Automatically reconnecting now.", failure=Failure(), ) try: self._connection.close() except: log.failure( "Exception from close() while automatically " "reconnecting. (Probably not serious.)", failure=Failure(), ) self._connection = self._pool.connectionFactory() self._cursor = self._connection.cursor() result = self._reallyCallSQL(sql, args) return result else: raise else: return returnValue def execSQL(self, *args, **kw): if self._completed: raise RuntimeError("Attempt to use {} transaction.".format(self._completed)) result = self._holder.submit( lambda: self._reallyExecSQL(*args, **kw) ) if self.noisy: def reportResult(results): sys.stdout.write("\n".join([ "", "SQL: %r %r" % (args, kw), "Results: %r" % (results,), "", ])) return results result.addBoth(reportResult) return result def _end(self, really, terminate=False): if not self._completed: self._completed = "terminated" if terminate else "ended" def reallySomething(): if self._cursor is None or self._first: return really() result = self._holder.submit(reallySomething) self._pool._repoolAfter(self, result) return result else: raise AlreadyFinishedError(self._completed) def commit(self): return self._end(self._connection.commit) def abort(self): def _report(f): log.failure("txn abort", failure=f) return self._end(self._connection.rollback).addErrback(_report) def terminate(self): def _report(f): log.failure("txn abort", failure=f) return self._end(self._connection.rollback, terminate=True).addErrback(_report) def reset(self): if not self._completed: raise RuntimeError("Attempt to re-set active transaction.") if self._completed != "terminated": self._completed = False self._first = True def _releaseConnection(self): self._completed = "released" self._stopped = True holder = self._holder self._holder = None def _reallyClose(): if self._cursor is None: return self._connection.close() holder.submit(_reallyClose) return holder.stop() class _NoTxn(object): implements(IAsyncTransaction) def __init__(self, pool, reason, label=None): self.dbtype = pool.dbtype self.reason = reason self._label = label def __repr__(self): return "_NoTxn({})".format(self._label) def _everything(self, *a, **kw): return fail(ConnectionError(self.reason)) execSQL = _everything commit = _everything abort = _everything class _WaitingTxn(object): implements(IAsyncTransaction) def __init__(self, pool, label=None): self._spool = [] self.dbtype = pool.dbtype self._label = label def __repr__(self): return "_WaitingTxn({})".format(self._label) def _enspool(self, cmd, a=(), kw={}): d = Deferred() self._spool.append((d, cmd, a, kw)) return d def _iterDestruct(self): return _destructively(self._spool) def _unspool(self, other): for (d, cmd, a, kw) in self._iterDestruct(): self._relayCommand(other, d, cmd, a, kw) def _relayCommand(self, other, d, cmd, a, kw): maybeDeferred(getattr(other, cmd), *a, **kw).chainDeferred(d) def execSQL(self, *a, **kw): return self._enspool("execSQL", a, kw) def commit(self): return self._enspool("commit") def abort(self): return succeed(None) class _HookableOperation(object): def __init__(self): self._hooks = [] @inlineCallbacks def runHooks(self, ignored=None): for operation in _destructively(self._hooks): yield operation() self.clear() returnValue(ignored) def addHook(self, operation): if self._hooks is not None: self._hooks.append(operation) def clear(self): self._hooks = None class _CommitAndAbortHooks(object): def __init__(self): self._preCommit = _HookableOperation() self._commit = _HookableOperation() self._abort = _HookableOperation()
Apache License 2.0
gregorch/ipet
ipet/concepts/Manager.py
Manager.getManageables
python
def getManageables(self, onlyactive=False): if onlyactive: return list(self.activeset) else: return list(self.stringrepresentations.values())
returns all (or only active) manageables
https://github.com/gregorch/ipet/blob/e4135ff936d3aa447a960d854f9c51554e5ba7dc/ipet/concepts/Manager.py#L104-L111
from .Observer import Observable from .IPETMessageStream import Message class Manager(Observable): def __init__(self, listofmanageables=[], activate=False): self.stringrepresentations = {} self.activeset = set() for manageable in listofmanageables: self.addManageable(manageable) if activate: self.activate(manageable) def addManageable(self, manageable): stringrepresentation = self.getStringRepresentation(manageable) self.stringrepresentations[stringrepresentation] = manageable def getStringRepresentation(self, manageable): if type(manageable) is str: return manageable else: try: return manageable.getName() except AttributeError: return str(manageable) def getManageable(self, stringrepresentation): return self.stringrepresentations.get(stringrepresentation, None) def deleteManageable(self, manageable): for key, val in list(self.stringrepresentations.items()): if val == manageable: oldstringrepresentation = key break del self.stringrepresentations[oldstringrepresentation] self.deactivate([manageable]) def reinsertManageable(self, manageable): active = self.isActive(manageable) self.deleteManageable(manageable) self.addManageable(manageable) if active: self.activate([manageable]) def editObjectAttribute(self, manageable, attributename, newattribute): oldname = self.getStringRepresentation(manageable) manageable.editAttribute(attributename, newattribute) newname = self.getStringRepresentation(manageable) print(newname, newattribute) if oldname != newname: self.chgManageableName(manageable, oldname, newname) self.notify(Message("Changed attribute %s of %s to %s" % (attributename, newname, newattribute), Message.MESSAGETYPE_INFO)) def chgManageableName(self, manageable, oldname, newname): if newname != oldname: if self.getManageable(newname) is not None: raise KeyError("An element of name %s is already listed" % (newname)) del self.stringrepresentations[oldname] self.stringrepresentations[newname] = manageable
MIT License
hvandenb/splunk-elasticsearch
search-elasticsearch/bin/splunklib/searchcommands/generating_command.py
GeneratingCommand.local
python
def local(self): return type(self)._local
Specifies whether this command should only be run on the search head. This setting is used to override Splunk's default policy for running streamable search commands. See the `streaming` configuration setting. Default: :const:`False`
https://github.com/hvandenb/splunk-elasticsearch/blob/6c3e6d0d48a9e20e2d421d8d490ac28e979de045/search-elasticsearch/bin/splunklib/searchcommands/generating_command.py#L121-L132
from __future__ import absolute_import from . search_command import SearchCommand class GeneratingCommand(SearchCommand): def generate(self): raise NotImplementedError('GeneratingCommand.generate(self)') def _execute(self, operation, reader, writer): for record in operation(): writer.writerow(record) return def _prepare(self, argv, input_file): ConfigurationSettings = type(self).ConfigurationSettings argv = argv[2:] return ConfigurationSettings, self.generate, argv, 'ANY' class ConfigurationSettings(SearchCommand.ConfigurationSettings): @property def generating(self): return True @property def generates_timeorder(self): return type(self)._generates_timeorder _generates_timeorder = False @property
Apache License 2.0
neilsaw/prilog_web
common.py
tmp_movie_clear
python
def tmp_movie_clear(): try: fl = glob(ap.stream_dir + "/*") if not fl: return fl.sort(key=lambda x: os.path.getctime(x)) if check_pass_time(fl[0], 7200): clear_path(fl[0]) except FileNotFoundError: pass
delete movie file delete oldest 1 movie file if its made 2 hours later this function is intended as every query Args: Returns:
https://github.com/neilsaw/prilog_web/blob/c16b697102c8fa41c2a2d6ff7b4921cc3253e3e9/common.py#L394-L418
import os import json from glob import glob import urllib.parse import app as ap import state_list as state import datetime CACHE_ELMS = 7 def save_cache(youtube_id, title, time_line, time_line_enemy, time_data, total_damage, debuff_value, status): past_status = cache_status_check(youtube_id) present_status = status if past_status is False: json.dump([title, time_line, time_line_enemy, time_data, total_damage, debuff_value, present_status], open(ap.cache_dir + urllib.parse.quote(youtube_id) + ".json", "w")) else: result, present_status = status_comparison(past_status, status) if result is True: json.dump([title, time_line, time_line_enemy, time_data, total_damage, debuff_value, present_status], open(ap.cache_dir + urllib.parse.quote(youtube_id) + ".json", "w")) return present_status def cache_check(youtube_id): try: cache_path = ap.cache_dir + urllib.parse.quote(youtube_id) + ".json" ret = json.load(open(cache_path)) if len(ret) is CACHE_ELMS: title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = ret if past_status // 100 == 3: if check_pass_time(cache_path, 300): return False else: return ret else: return ret else: clear_path(cache_path) return False except FileNotFoundError: return False def queue_cache_check(youtube_id): try: cache_path = ap.cache_dir + urllib.parse.quote(youtube_id) + ".json" ret = json.load(open(cache_path)) if len(ret) is CACHE_ELMS: return ret else: return False except FileNotFoundError: return False def cache_status_check(youtube_id): try: cache_path = ap.cache_dir + urllib.parse.quote(youtube_id) + ".json" ret = json.load(open(cache_path)) if len(ret) is CACHE_ELMS: past_status = ret[-1] return past_status else: return False except FileNotFoundError: return False def status_comparison(past, present): if past // 100 == 2 or past // 100 == 4: return False, past if present // 100 == 2 or present // 100 == 4: return True, present if not past % 100 // 10 and present % 100 // 10: return False, past elif not past % 100 // 10 and not present % 100 // 10: return True, present - 100 elif not present % 100 // 10: return True, present elif past % 100 // 10 and present % 100 // 10: return True, present + 100 else: return True, 399 def queue_append(path): try: with open(path, mode="w"): pass except FileExistsError: pass return def pending_append(path): try: with open(path, mode="w"): pass except FileExistsError: pass return def is_path_due(path): try: directory = os.path.dirname(path) fl = glob(directory + "/*") if not fl: return False fl.sort(key=lambda x: os.path.getctime(x)) comp = fl[0].replace("\\", "/") if comp == path: return True else: return False except: return False def is_path_exists(path): try: directory = os.path.dirname(path) fl = os.listdir(directory + "/") if not fl: return False else: return True except: return False def is_pending_download(margin): queue_path = ap.dl_pending_dir + "pending" result = False if os.path.exists(queue_path): if check_pass_time(queue_path, margin): clear_path(queue_path) result = True else: result = True return result def watchdog(youtube_id, is_parent, margin, err_type): queue_path = ap.queue_dir + str(youtube_id) pending_path = ap.pending_dir + str(youtube_id) if is_parent: job_path = pending_path else: job_path = queue_path if os.path.exists(job_path): if check_pass_time(job_path, margin): save_cache(youtube_id, "", False, False, False, False, False, err_type) clear_path(job_path) if is_parent: clear_path(queue_path) return def watchdog_download(youtube_id, margin): queue_path = ap.dl_queue_dir + str(youtube_id) result = False if os.path.exists(queue_path): if check_pass_time(queue_path, margin): clear_path(queue_path) result = True return result
MIT License
imjoy-team/imjoy-engine
imjoy/s3.py
ClosedRange.__bool__
python
def __bool__(self) -> bool: return len(self) > 0
Return the boolean representation of the range.
https://github.com/imjoy-team/imjoy-engine/blob/e529f02ff5ccfb39385a192ef62ee32e1d2bccf6/imjoy/s3.py#L60-L62
import asyncio import json import logging import os import re import sys from datetime import datetime from email.utils import formatdate from pathlib import Path from typing import Any, NamedTuple, Optional import botocore from aiobotocore.session import get_session from botocore.exceptions import ClientError from fastapi import APIRouter, Depends, Request from fastapi.responses import FileResponse, Response from starlette.datastructures import Headers from starlette.types import Receive, Scope, Send from imjoy.core.auth import login_optional from imjoy.minio import MinioClient from imjoy.utils import generate_password, safe_join logging.basicConfig(stream=sys.stdout) logger = logging.getLogger("s3") logger.setLevel(logging.INFO) RANGE_REGEX = re.compile(r"^bytes=(?P<start>\d+)-(?P<end>\d*)$") class OpenRange(NamedTuple): start: int end: Optional[int] = None def clamp(self, start: int, end: int) -> "ClosedRange": begin = max(self.start, start) end = min((x for x in (self.end, end) if x)) begin = min(begin, end) end = max(begin, end) return ClosedRange(begin, end) class ClosedRange(NamedTuple): start: int end: int def __len__(self) -> int: return self.end - self.start + 1
MIT License
czhu95/ternarynet
tensorpack/RL/expreplay.py
ExpReplay._populate_exp
python
def _populate_exp(self): old_s = self.player.current_state() if self.rng.rand() <= self.exploration: act = self.rng.choice(range(self.num_actions)) else: ss = [old_s] isOver = False for k in range(1, self.history_len): hist_exp = self.mem[-k] if hist_exp.isOver: isOver = True if isOver: ss.append(np.zeros_like(ss[0])) else: ss.append(hist_exp.state) ss.reverse() ss = np.concatenate(ss, axis=2) q_values = self.predictor([[ss]])[0][0] act = np.argmax(q_values) reward, isOver = self.player.action(act) if self.reward_clip: reward = np.clip(reward, self.reward_clip[0], self.reward_clip[1]) self.mem.append(Experience(old_s, act, reward, isOver))
populate a transition by epsilon-greedy
https://github.com/czhu95/ternarynet/blob/1a67251f7f5a1cdf854f87f90f841655c7c9f11c/tensorpack/RL/expreplay.py#L87-L114
import numpy as np from collections import deque, namedtuple import threading from tqdm import tqdm import six from six.moves import queue from ..dataflow import DataFlow from ..utils import * from ..utils.concurrency import LoopThread from ..callbacks.base import Callback __all__ = ['ExpReplay'] Experience = namedtuple('Experience', ['state', 'action', 'reward', 'isOver']) class ExpReplay(DataFlow, Callback): def __init__(self, predictor_io_names, player, batch_size=32, memory_size=1e6, populate_size=None, init_memory_size=50000, exploration=1, end_exploration=0.1, exploration_epoch_anneal=0.002, reward_clip=None, update_frequency=1, history_len=1 ): if populate_size is not None: logger.warn("populate_size in ExpReplay is deprecated in favor of init_memory_size") init_memory_size = populate_size init_memory_size = int(init_memory_size) for k, v in locals().items(): if k != 'self': setattr(self, k, v) self.num_actions = player.get_action_space().num_actions() logger.info("Number of Legal actions: {}".format(self.num_actions)) self.mem = deque(maxlen=memory_size) self.rng = get_rng(self) self._init_memory_flag = threading.Event() self._predictor_io_names = predictor_io_names def _init_memory(self): logger.info("Populating replay memory...") old_exploration = self.exploration self.exploration = 1 for k in range(self.history_len): self._populate_exp() self.exploration = old_exploration with tqdm(total=self.init_memory_size) as pbar: while len(self.mem) < self.init_memory_size: self._populate_exp() pbar.update() self._init_memory_flag.set()
Apache License 2.0
mgear-dev/mgear_core
scripts/mgear/core/callbackManager.py
UserTimeChangedManager.userTimeChanged
python
def userTimeChanged(self, *args): if om.MConditionMessage.getConditionState("playingBackAuto"): return self.func(*args)
Check if playback is active, if so return without calling func
https://github.com/mgear-dev/mgear_core/blob/bb450fda44ff79c57f5f73d5a58c97a6b5c5d848/scripts/mgear/core/callbackManager.py#L325-L330
import uuid from functools import wraps, partial from maya.api import OpenMaya as om try: RECORDED_CALLBACKS except NameError: RECORDED_CALLBACKS = {} def removeAllSessionCB(): [removeCB(cb) for cb in RECORDED_CALLBACKS.keys()] def removeCBviaMayaID(mayaID, callback_info=RECORDED_CALLBACKS): for callback_name, callback_id in RECORDED_CALLBACKS.iteritems(): if callback_id == mayaID: removeCB(callback_name, callback_info=callback_info) def removeAllCBFromNode(node): m_node = getMObject(node) for mayaID in om.MMessage.nodeCallbacks(m_node): removeCBviaMayaID(mayaID) def removeCB(callback_identifier, callback_info=RECORDED_CALLBACKS): callback_id = callback_info.pop(callback_identifier, callback_identifier) try: om.MMessage.removeCallback(callback_id) except (RuntimeError, ValueError): pass def removeNamespaceCB(namespace): for cb in RECORDED_CALLBACKS.keys(): if cb.startswith(namespace): removeCB(cb) def checkAndRecordCB(callback_name, callback_id, callback_info=RECORDED_CALLBACKS): if callback_name in callback_info: removeCB(callback_name, callback_info=callback_info) callback_info[callback_name] = callback_id def registerSessionCB(func): @wraps(func) def wrap(*args, **kwargs): callback_name, callback_func = args[:2] callback_id = func(*args, **kwargs) checkAndRecordCB(callback_name, callback_id) return callback_id return wrap def testFunc(*args): print("TESTFUNC", args) def getMObject(node): mSel = om.MSelectionList() mSel.add(node) return mSel.getDependNode(0) @registerSessionCB def selectionChangedCB(callback_name, func): callback_id = om.MEventMessage.addEventCallback("SelectionChanged", func) return callback_id @registerSessionCB def attributeChangedCB(callback_name, func, node, attributes): m_node = getMObject(node) attrMan = AttributeChangedManager(m_node, attributes, func) managerFunc = attrMan.attributeChanged callback_id = om.MNodeMessage.addAttributeChangedCallback(m_node, managerFunc) return callback_id @registerSessionCB def newSceneCB(callback_name, func): callBackType = om.MSceneMessage.kSceneUpdate callback_id = om.MSceneMessage.addCallback(callBackType, func) return callback_id @registerSessionCB def timeChangedCB(callback_name, func): callback_id = om.MDGMessage.addTimeChangeCallback(func) return callback_id @registerSessionCB def userTimeChangedCB(callback_name, func): timeManager = UserTimeChangedManager(func) managerFunc = timeManager.userTimeChanged callback_id = om.MDGMessage.addTimeChangeCallback(managerFunc) return callback_id @registerSessionCB def sampleCallback(callback_name, func): callback_id = 2349823749 return callback_id class AttributeChangedManager(object): def __init__(self, m_node, attributes, func): self.m_node = m_node self.attributes = attributes self.func = func def attributeChanged(self, id, plug1, plug2, payload): if id != 2056: return if plug1.partialName() in self.attributes: self.func() class UserTimeChangedManager(object): def __init__(self, func): self.func = func
MIT License